repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Agnishom/kabooblydoo | lib/werkzeug/__init__.py | 116 | 7211 | # -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
from werkzeug._compat import iteritems
# the version. Usually set automatically by a script.
__version__ = '0.10.4'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the modules where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the modules that implement the objects.
# import mapping to objects in other modules
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy',
'LocalStack', 'release_local'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote',
'url_unquote_plus', 'url_fix', 'Href',
'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property',
'append_slash_redirect', 'redirect',
'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder',
'validate_arguments', 'ArgumentValidationError',
'bind_arguments', 'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator',
'FileWrapper', 'make_line_iter', 'LimitedStream',
'responder', 'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict', 'ImmutableTypeConversionDict',
'Accept', 'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Authorization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date',
'cookie_date', 'parse_cache_control_header',
'is_resource_modified', 'parse_accept_header',
'parse_set_header', 'quote_etag', 'unquote_etag',
'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header',
'remove_entity_headers', 'is_entity_header',
'remove_hop_by_hop_headers', 'parse_options_header',
'dump_options_header', 'is_hop_by_hop_header',
'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request',
'Response', 'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin', 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin',
'UserAgentMixin', 'AuthorizationMixin',
'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# modules that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in iteritems(all_by_module):
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
# Due to bootstrapping issues we need to import exceptions here.
# Don't ask :-(
__import__('werkzeug.exceptions')
| mit |
novafloss/django-agnocomplete | demo/tests/test_admin_views.py | 1 | 1715 | from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from ..admin import FavoriteColorModelForm
class AdminTest(TestCase):
def setUp(self):
super(AdminTest, self).setUp()
# create a superuser to be logged in
self.admin = User.objects.create_superuser(
'admin', 'admin@example.com', 'abcd1234')
self.client.login(username='admin', password='abcd1234')
def test_home(self):
response = self.client.get(reverse('admin:index'))
self.assertEqual(response.status_code, 200)
def test_demo_person(self):
response = self.client.get(reverse('admin:demo_person_changelist'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:demo_person_add'))
self.assertEqual(response.status_code, 200)
def test_demo_color(self):
response = self.client.get(
reverse('admin:demo_favoritecolor_changelist'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:demo_favoritecolor_add'))
self.assertEqual(response.status_code, 200)
def test_demo_color_form(self):
response = self.client.get(reverse('admin:demo_favoritecolor_add'))
self.assertIn('adminform', response.context)
adminform = response.context['adminform']
form = adminform.form
self.assertTrue(isinstance(form, FavoriteColorModelForm))
self.assertIn('media', response.context)
media = response.context['media']
media = "{}".format(media)
self.assertIn('selectize.js', media)
self.assertIn('selectize.css', media)
| mit |
argv0/cloudstack | tools/marvin/marvin/sandbox/basic/basic_env.py | 2 | 4360 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
############################################################
# Experimental state of scripts
# * Need to be reviewed
# * Only a sandbox
############################################################
'''
import random
import marvin
from ConfigParser import SafeConfigParser
from optparse import OptionParser
from marvin.configGenerator import *
def getGlobalSettings(config):
for k, v in dict(config.items('globals')).iteritems():
cfg = configuration()
cfg.name = k
cfg.value = v
yield cfg
def describeResources(config):
zs = cloudstackConfiguration()
z = zone()
z.dns1 = config.get('environment', 'dns')
z.internaldns1 = config.get('environment', 'dns')
z.name = 'Sandbox-%s'%(config.get('cloudstack', 'hypervisor'))
z.networktype = 'Basic'
z.securitygroupenabled = 'true'
p = pod()
p.name = 'POD0'
p.gateway = config.get('cloudstack', 'private.gateway')
p.startip = config.get('cloudstack', 'private.pod.startip')
p.endip = config.get('cloudstack', 'private.pod.endip')
p.netmask = config.get('cloudstack', 'private.netmask')
v = iprange()
v.gateway = config.get('cloudstack', 'public.gateway')
v.startip = config.get('cloudstack', 'public.vlan.startip')
v.endip = config.get('cloudstack', 'public.vlan.endip')
v.netmask = config.get('cloudstack', 'public.netmask')
p.guestIpRanges.append(v)
c = cluster()
c.clustername = 'C0'
c.hypervisor = config.get('cloudstack', 'hypervisor')
c.clustertype = 'CloudManaged'
h = host()
h.username = 'root'
h.password = config.get('cloudstack', 'host.password')
h.url = 'http://%s'%(config.get('cloudstack', 'host'))
c.hosts.append(h)
ps = primaryStorage()
ps.name = 'PS0'
ps.url = config.get('cloudstack', 'primary.pool')
c.primaryStorages.append(ps)
p.clusters.append(c)
z.pods.append(p)
secondary = secondaryStorage()
secondary.url = config.get('cloudstack', 'secondary.pool')
z.secondaryStorages.append(secondary)
'''Add zone'''
zs.zones.append(z)
'''Add mgt server'''
mgt = managementServer()
mgt.mgtSvrIp = config.get('environment', 'mshost')
zs.mgtSvr.append(mgt)
'''Add a database'''
db = dbServer()
db.dbSvr = config.get('environment', 'mysql.host')
db.user = config.get('environment', 'mysql.cloud.user')
db.passwd = config.get('environment', 'mysql.cloud.passwd')
zs.dbSvr = db
'''Add some configuration'''
[zs.globalConfig.append(cfg) for cfg in getGlobalSettings(config)]
''''add loggers'''
testClientLogger = logger()
testClientLogger.name = 'TestClient'
testClientLogger.file = '/var/log/testclient.log'
testCaseLogger = logger()
testCaseLogger.name = 'TestCase'
testCaseLogger.file = '/var/log/testcase.log'
zs.logger.append(testClientLogger)
zs.logger.append(testCaseLogger)
return zs
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-i', '--input', action='store', default='setup.properties', \
dest='input', help='file containing environment setup information')
parser.add_option('-o', '--output', action='store', default='./sandbox.cfg', \
dest='output', help='path where environment json will be generated')
(opts, args) = parser.parse_args()
cfg_parser = SafeConfigParser()
cfg_parser.read(opts.input)
cfg = describeResources(cfg_parser)
generate_setup_config(cfg, opts.output)
| apache-2.0 |
ManuSchmi88/landlab | landlab/plot/imshow.py | 3 | 21050 | #! /usr/bin/env python
"""
Methods to plot data defined on Landlab grids.
Plotting functions
++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.plot.imshow.imshow_grid
~landlab.plot.imshow.imshow_grid_at_cell
~landlab.plot.imshow.imshow_grid_at_node
"""
import numpy as np
import inspect
from landlab.field.scalar_data_fields import FieldError
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
from landlab.grid import CLOSED_BOUNDARY
from landlab.grid.raster import RasterModelGrid
from landlab.grid.voronoi import VoronoiDelaunayGrid
from landlab.utils.decorators import deprecated
def imshow_grid_at_node(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid_at_node(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
if isinstance(values, str):
values_at_node = grid.at_node[values]
else:
values_at_node = values
if values_at_node.size != grid.number_of_nodes:
raise ValueError('number of values does not match number of nodes')
values_at_node = np.ma.masked_where(
grid.status_at_node == CLOSED_BOUNDARY, values_at_node)
try:
shape = grid.shape
except AttributeError:
shape = (-1, )
_imshow_grid_values(grid, values_at_node.reshape(shape), **kwds)
if isinstance(values, str):
plt.title(values)
@deprecated(use='imshow_grid_at_node', version='0.5')
def imshow_node_grid(grid, values, **kwds):
imshow_grid_at_node(grid, values, **kwds)
def imshow_grid_at_cell(grid, values, **kwds):
"""Map view of grid data over all grid cells.
Prepares a map view of data over all cells in the grid.
Method can take any of the same ``**kwds`` as :func:`imshow_grid_at_node`.
Construction ::
imshow_grid_at_cell(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Values at the cells on the grid. Alternatively, can be a field name
(string) from which to draw the data from the grid.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
Raises
------
ValueError
If input grid is not uniform rectilinear.
"""
if isinstance(values, str):
try:
values_at_cell = grid.at_cell[values]
except FieldError:
values_at_cell = grid.at_node[values]
else:
values_at_cell = values
if values_at_cell.size == grid.number_of_nodes:
values_at_cell = values_at_cell[grid.node_at_cell]
if values_at_cell.size != grid.number_of_cells:
raise ValueError('number of values must match number of cells or '
'number of nodes')
values_at_cell = np.ma.asarray(values_at_cell)
values_at_cell.mask = True
values_at_cell.mask[grid.core_cells] = False
myimage = _imshow_grid_values(grid,
values_at_cell.reshape(grid.cell_grid_shape),
**kwds)
if isinstance(values, str):
plt.title(values)
return myimage
@deprecated(use='imshow_grid_at_cell', version='0.5')
def imshow_cell_grid(grid, values, **kwds):
imshow_grid_at_cell(grid, values, **kwds)
def _imshow_grid_values(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=(None, None),
symmetric_cbar=False, cmap='pink', limits=None,
colorbar_label = None,
allow_colorbar=True, vmin=None, vmax=None,
norm=None, shrink=1., color_for_closed='black',
color_for_background=None, show_elements=False,
output=None):
gridtypes = inspect.getmro(grid.__class__)
cmap = plt.get_cmap(cmap)
if color_for_closed is not None:
cmap.set_bad(color=color_for_closed)
else:
cmap.set_bad(alpha=0.)
if isinstance(grid, RasterModelGrid):
if values.ndim != 2:
raise ValueError('values must have ndim == 2')
y = np.arange(values.shape[0] + 1) * grid.dy - grid.dy * .5
x = np.arange(values.shape[1] + 1) * grid.dx - grid.dx * .5
kwds = dict(cmap=cmap)
(kwds['vmin'], kwds['vmax']) = (values.min(), values.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (values.min(), values.max())
limit = max(abs(var_min), abs(var_max))
(kwds['vmin'], kwds['vmax']) = (- limit, limit)
elif limits is not None:
(kwds['vmin'], kwds['vmax']) = (limits[0], limits[1])
else:
if vmin is not None:
kwds['vmin'] = vmin
if vmax is not None:
kwds['vmax'] = vmax
if np.isclose(grid.dx, grid.dy):
if values.size == grid.number_of_nodes:
myimage = plt.imshow(
values.reshape(grid.shape), origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
else: # this is a cell grid, and has been reshaped already...
myimage = plt.imshow(values, origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
myimage = plt.pcolormesh(x, y, values, **kwds)
plt.gca().set_aspect(1.)
plt.autoscale(tight=True)
if allow_colorbar:
cb = plt.colorbar(norm=norm, shrink=shrink)
if colorbar_label:
cb.set_label(colorbar_label)
elif VoronoiDelaunayGrid in gridtypes:
# This is still very much ad-hoc, and needs prettifying.
# We should save the modifications needed to plot color all the way
# to the diagram edge *into* the grid, for faster plotting.
# (see http://stackoverflow.com/questions/20515554/...
# colorize-voronoi-diagram)
# (This technique is not implemented yet)
from scipy.spatial import voronoi_plot_2d
import matplotlib.colors as colors
import matplotlib.cm as cmx
cm = plt.get_cmap(cmap)
if (limits is None) and ((vmin is None) and (vmax is None)):
# only want to work with NOT CLOSED nodes
open_nodes = grid.status_at_node != 4
if symmetric_cbar:
(var_min, var_max) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
limit = max(abs(var_min), abs(var_max))
(vmin, vmax) = (- limit, limit)
else:
(vmin, vmax) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
elif limits is not None:
(vmin, vmax) = (limits[0], limits[1])
else:
open_nodes = grid.status_at_node != 4
if vmin is None:
vmin = values.flat[open_nodes].min()
if vmax is None:
vmax = values.flat[open_nodes].max()
cNorm = colors.Normalize(vmin, vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
colorVal = scalarMap.to_rgba(values)
if show_elements:
myimage = voronoi_plot_2d(grid.vor, show_vertices=False,
show_points=False)
# show_points to be supported in scipy0.18, but harmless for now
mycolors = (i for i in colorVal)
for order in grid.vor.point_region:
region = grid.vor.regions[order]
colortouse = next(mycolors)
if -1 not in region:
polygon = [grid.vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=colortouse)
plt.gca().set_aspect(1.)
# plt.autoscale(tight=True)
# Tempting though it is to move the boundary outboard of the outermost
# nodes (e.g., to the outermost corners), this is a bad idea, as the
# outermost cells tend to have highly elongated shapes which make the
# plot look stupid
plt.xlim((np.min(grid.node_x), np.max(grid.node_x)))
plt.ylim((np.min(grid.node_y), np.max(grid.node_y)))
scalarMap.set_array(values)
if allow_colorbar:
cb = plt.colorbar(scalarMap, shrink=shrink)
if grid_units[1] is None and grid_units[0] is None:
grid_units = grid.axis_units
if grid_units[1] == '-' and grid_units[0] == '-':
plt.xlabel('X')
plt.ylabel('Y')
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
if plot_name is not None:
plt.title('%s' % (plot_name))
if var_name is not None or var_units is not None:
if var_name is not None:
assert type(var_name) is str
if var_units is not None:
assert type(var_units) is str
colorbar_label = var_name + ' (' + var_units + ')'
else:
colorbar_label = var_name
else:
assert type(var_units) is str
colorbar_label = '(' + var_units + ')'
assert type(colorbar_label) is str
assert allow_colorbar
cb.set_label(colorbar_label)
if color_for_background is not None:
plt.gca().set_axis_bgcolor(color_for_background)
if output is not None:
if type(output) is str:
plt.savefig(output)
plt.clf()
elif output:
plt.show()
def imshow_grid(grid, values, **kwds):
"""Prepare a map view of data over all nodes or cells in the grid.
Data is plotted as colored cells. If at='node', the surrounding cell is
shaded with the value at the node at its center. If at='cell', the cell
is shaded with its own value. Outer edges of perimeter cells are
extrapolated. Closed elements are colored uniformly (default black,
overridden with kwd 'color_for_closed'); other open boundary nodes get
their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node or cell values, or a field name as a string from which to draw
the data.
at : str, {'node', 'cell'}
Tells plotter where values are defined.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
show = kwds.pop('show', False)
values_at = kwds.pop('values_at', 'node')
values_at = kwds.pop('at', values_at)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if values_at == 'node':
imshow_grid_at_node(grid, values, **kwds)
elif values_at == 'cell':
imshow_grid_at_cell(grid, values, **kwds)
else:
raise TypeError('value location %s not understood' % values_at)
# retained for backwards compatibility:
if show:
plt.show()
| mit |
ToBaer94/pygame_platformer | main.py | 1 | 2485 | import sys
import pygame as pg
from states.splash_state import SplashScreen
from states.map_state import Map
from states.gameplay_state import GamePlay
from states.level_start_state import LevelOpening
from states.gameover_state import GameOver
class Game(object):
def __init__(self, screen, states, start_state):
self.done = False
self.screen = screen
self.clock = pg.time.Clock()
self.fps = 60.0
self.target_fps = 60.0
self.ms_per_sec = 1000.0
self.desired_frame_time = self.ms_per_sec / self.target_fps
self.max_step = 1.0
self.states = states
self.state_name = start_state
self.state = self.states[self.state_name]
self.stuff = pg.USEREVENT + 1
self.custom_event = pg.event.Event(self.stuff)
def event_loop(self):
pg.event.post(self.custom_event)
for event in pg.event.get():
self.state.get_event(event)
def flip_state(self):
current_state = self.state_name
next_state = self.state.next_state
self.state.done = False
self.state_name = next_state
persistent = self.state.persist
self.state = self.states[next_state]
self.state.screen = self.screen
self.state.startup(persistent)
def update(self, dt):
if self.state.quit:
self.done = True
elif self.state.done:
self.flip_state()
self.state.update(dt)
def draw(self):
self.state.draw(self.screen)
def run(self):
while not self.done:
self.event_loop()
frame_time = self.clock.tick(self.fps)
total_dt = frame_time / self.desired_frame_time
while total_dt > 0.0:
delta_time = min(total_dt, self.max_step)
self.update(delta_time)
total_dt -= delta_time
self.draw()
pg.display.flip()
if __name__ == "__main__":
pg.mixer.pre_init(frequency=22050, size=-16, channels=2, buffer=512)
pg.mixer.init()
pg.init()
pg.display.set_caption("Platform Jumper 2")
screen = pg.display.set_mode((800, 600))
states = {"SPLASH": SplashScreen(),
"MAP": Map(),
"LEVEL": GamePlay(),
"LEVELPREVIEW": LevelOpening(),
"GAMEOVER": GameOver()
}
game = Game(screen, states, "SPLASH")
game.run()
pg.quit()
sys.exit()
| lgpl-3.0 |
darkleons/odoo | addons/document/wizard/document_configuration.py | 381 | 4895 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class document_configuration(osv.osv_memory):
_name='document.configuration'
_description = 'Directory Configuration'
_inherit = 'res.config'
def execute(self, cr, uid, ids, context=None):
dir_pool = self.pool.get('document.directory')
data_pool = self.pool.get('ir.model.data')
model_pool = self.pool.get('ir.model')
content_pool = self.pool.get('document.directory.content')
if self.pool.get('sale.order'):
# Sale order
dir_data_id = data_pool._get_id(cr, uid, 'document', 'dir_sale_order_all')
if dir_data_id:
sale_dir_id = data_pool.browse(cr, uid, dir_data_id, context=context).res_id
else:
sale_dir_id = data_pool.create(cr, uid, {'name': 'Sale Orders'})
mid = model_pool.search(cr, uid, [('model','=','sale.order')])
dir_pool.write(cr, uid, [sale_dir_id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': '[]',
})
# Qutation
dir_data_id = data_pool._get_id(cr, uid, 'document', 'dir_sale_order_quote')
if dir_data_id:
quta_dir_id = data_pool.browse(cr, uid, dir_data_id, context=context).res_id
else:
quta_dir_id = data_pool.create(cr, uid, {'name': 'Sale Quotations'})
dir_pool.write(cr, uid, [quta_dir_id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': "[('state','=','draft')]",
})
# Sale Order Report
order_report_data_id = data_pool._get_id(cr, uid, 'sale', 'report_sale_order')
if order_report_data_id:
order_report_id = data_pool.browse(cr, uid, order_report_data_id, context=context).res_id
content_pool.create(cr, uid, {
'name': "Print Order",
'suffix': "_print",
'report_id': order_report_id,
'extension': '.pdf',
'include_name': 1,
'directory_id': sale_dir_id,
})
content_pool.create(cr, uid, {
'name': "Print Quotation",
'suffix': "_print",
'report_id': order_report_id,
'extension': '.pdf',
'include_name': 1,
'directory_id': quta_dir_id,
})
if self.pool.get('product.product'):
# Product
dir_data_id = data_pool._get_id(cr, uid, 'document', 'dir_product')
if dir_data_id:
product_dir_id = data_pool.browse(cr, uid, dir_data_id, context=context).res_id
else:
product_dir_id = data_pool.create(cr, uid, {'name': 'Products'})
mid = model_pool.search(cr, uid, [('model','=','product.product')])
dir_pool.write(cr, uid, [product_dir_id], {
'type':'ressource',
'ressource_type_id': mid[0],
})
if self.pool.get('account.analytic.account'):
# Project
dir_data_id = data_pool._get_id(cr, uid, 'document', 'dir_project')
if dir_data_id:
project_dir_id = data_pool.browse(cr, uid, dir_data_id, context=context).res_id
else:
project_dir_id = data_pool.create(cr, uid, {'name': 'Projects'})
mid = model_pool.search(cr, uid, [('model','=','account.analytic.account')])
dir_pool.write(cr, uid, [project_dir_id], {
'type':'ressource',
'ressource_type_id': mid[0],
'domain': '[]',
'ressource_tree': 1
})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
carlye566/IoT-POX | pox/samples/mixed_switches.py | 72 | 2085 | # Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A quick example of treating different datapaths differently.
Although it's not currently particularly well supported, there's
nothing to stop one from using different components with particular
switches. There are multiple ways to do this, but this component
demonstrates a pretty straightforward one.
When components are loaded from the commandline, their launch()
function is run. In many cases, this launch() function sets up
a listener for openflow.ConnectionUp events. When one is raised,
the component handles it by setting up more event listeners on
that connection.
If we want to have some switches behave one way and others
behave another way, we simply don't let them set up their own
ConnectionUp handlers and take care of initializing the rest
of the component ourself.
Here we demonstrate that by making switches with odd-numbered
DPIDs be l2_pairs switches and even-numbered DPIDs be l2_learning
switches.
"""
from pox.core import core
import pox.forwarding.l2_pairs as l2p
import pox.forwarding.l2_learning as l2l
log = core.getLogger()
def _handle_ConnectionUp (event):
if event.dpid & 1 == 1:
log.info("Treating %s as l2_pairs", event.connection)
event.connection.addListenerByName("PacketIn", l2p._handle_PacketIn)
else:
log.info("Treating %s as l2_learning", event.connection)
l2l.LearningSwitch(event.connection, False)
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
log.info("Mixed switches demo running.")
| apache-2.0 |
neuroelectro/neuroelectro_org | db_functions/country_codes.py | 2 | 9916 | #!/usr/bin/python
# coding: utf-8
""" This file needs to be maintained by hand.
see: http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
see: http://www.iso.org/iso/iso3166_en_code_lists.txt
"""
country_code = {'AD': 'Andorra',
'AE': 'United Arab Emirates',
'AF': 'Afghanistan',
'AG': 'Antigua and Barbuda',
'AI': 'Anguilla',
'AL': 'Albania',
'AM': 'Armenia',
'AN': 'Netherlands Antilles',
'AO': 'Angola',
'AQ': 'Antarctica',
'AR': 'Argentina',
'AS': 'American Samoa',
'AT': 'Austria',
'AU': 'Australia',
'AW': 'Aruba',
'AX': 'Aland Islands', # Åland Islands
'AZ': 'Azerbaijan',
'BA': 'Bosnia and Herzegovina',
'BB': 'Barbados',
'BD': 'Bangladesh',
'BE': 'Belgium',
'BF': 'Burkina Faso',
'BG': 'Bulgaria',
'BH': 'Bahrain',
'BI': 'Burundi',
'BJ': 'Benin',
'BM': 'Bermuda',
'BN': 'Brunei Darussalam',
'BO': 'Bolivia',
'BR': 'Brazil',
'BS': 'Bahamas',
'BT': 'Bhutan',
'BV': 'Bouvet Island',
'BW': 'Botswana',
'BY': 'Belarus',
'BZ': 'Belize',
'CA': 'Canada',
'CC': 'Cocos (Keeling) Islands',
'CD': 'Congo, the Democratic Republic of the',
'CF': 'Central African Republic',
'CG': 'Congo',
'CH': 'Switzerland',
'CI': 'Cote d\'Ivoire', # Côte d'Ivoire
'CK': 'Cook Islands',
'CL': 'Chile',
'CM': 'Cameroon',
'CN': 'China',
'CO': 'Colombia',
'CR': 'Costa Rica',
'CU': 'Cuba',
'CV': 'Cape Verde',
'CX': 'Christmas Island',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DE': 'Germany',
'DJ': 'Djibouti',
'DK': 'Denmark',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'DZ': 'Algeria',
'EC': 'Ecuador',
'EE': 'Estonia',
'EG': 'Egypt',
'EH': 'Western Sahara',
'ER': 'Eritrea',
'ES': 'Spain',
'ET': 'Ethiopia',
'FI': 'Finland',
'FJ': 'Fiji',
'FK': 'Falkland Islands (Malvinas)',
'FM': 'Micronesia, Federated States of',
'FO': 'Faroe Islands',
'FR': 'France',
'FX': 'France, Metropolitan',
'GA': 'Gabon',
'GB': 'United Kingdom',
'GD': 'Grenada',
'GE': 'Georgia',
'GF': 'French Guiana',
'GG': 'Guernsey',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GL': 'Greenland',
'GM': 'Gambia',
'GN': 'Guinea',
'GP': 'Guadeloupe',
'GQ': 'Equatorial Guinea',
'GR': 'Greece',
'GS': 'South Georgia and the South Sandwich Islands',
'GT': 'Guatemala',
'GU': 'Guam',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HK': 'Hong Kong',
'HM': 'Heard Island and McDonald Islands',
'HN': 'Honduras',
'HR': 'Croatia',
'HT': 'Haiti',
'HU': 'Hungary',
'ID': 'Indonesia',
'IE': 'Ireland',
'IL': 'Israel',
'IM': 'Isle of Man',
'IN': 'India',
'IO': 'British Indian Ocean Territory',
'IQ': 'Iraq',
'IR': 'Iran, Islamic Republic of',
'IS': 'Iceland',
'IT': 'Italy',
'JE': 'Jersey',
'JM': 'Jamaica',
'JO': 'Jordan',
'JP': 'Japan',
'KE': 'Kenya',
'KG': 'Kyrgyzstan',
'KH': 'Cambodia',
'KI': 'Kiribati',
'KM': 'Comoros',
'KN': 'Saint Kitts and Nevis',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KY': 'Cayman Islands',
'KZ': 'Kazakhstan',
'LA': 'Lao People\'s Democratic Republic',
'LB': 'Lebanon',
'LC': 'Saint Lucia',
'LI': 'Liechtenstein',
'LK': 'Sri Lanka',
'LR': 'Liberia',
'LS': 'Lesotho',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'LV': 'Latvia',
'LY': 'Libyan Arab Jamahiriya',
'MA': 'Morocco',
'MC': 'Monaco',
'MD': 'Moldova, Republic of',
'ME': 'Montenegro',
'MG': 'Madagascar',
'MH': 'Marshall Islands',
'MK': 'Macedonia, the former Yugoslav Republic of',
'ML': 'Mali',
'MM': 'Myanmar',
'MN': 'Mongolia',
'MO': 'Macao',
'MP': 'Northern Mariana Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MS': 'Montserrat',
'MT': 'Malta',
'MU': 'Mauritius',
'MV': 'Maldives',
'MW': 'Malawi',
'MX': 'Mexico',
'MY': 'Malaysia',
'MZ': 'Mozambique',
'NA': 'Namibia',
'NC': 'New Caledonia',
'NE': 'Niger',
'NF': 'Norfolk Island',
'NG': 'Nigeria',
'NI': 'Nicaragua',
'NL': 'Netherlands',
'NO': 'Norway',
'NP': 'Nepal',
'NR': 'Nauru',
'NU': 'Niue',
'NZ': 'New Zealand',
'OM': 'Oman',
'PA': 'Panama',
'PE': 'Peru',
'PF': 'French Polynesia',
'PG': 'Papua New Guinea',
'PH': 'Philippines',
'PK': 'Pakistan',
'PL': 'Poland',
'PM': 'Saint Pierre and Miquelon',
'PN': 'Pitcairn',
'PR': 'Puerto Rico',
'PS': 'Palestinian Territory, Occupied',
'PT': 'Portugal',
'PW': 'Palau',
'PY': 'Paraguay',
'QA': 'Qatar',
'RE': 'Reunion', # Réunion
'RO': 'Romania',
'RS': 'Serbia',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'SA': 'Saudi Arabia',
'SB': 'Solomon Islands',
'SC': 'Seychelles',
'SD': 'Sudan',
'SE': 'Sweden',
'SG': 'Singapore',
'SH': 'Saint Helena',
'SI': 'Slovenia',
'SJ': 'Svalbard and Jan Mayen',
'SK': 'Slovakia',
'SL': 'Sierra Leone',
'SM': 'San Marino',
'SN': 'Senegal',
'SO': 'Somalia',
'SR': 'Suriname',
'ST': 'Sao Tome and Principe',
'SV': 'El Salvador',
'SY': 'Syrian Arab Republic',
'SZ': 'Swaziland',
'TC': 'Turks and Caicos Islands',
'TD': 'Chad',
'TF': 'French Southern Territories',
'TG': 'Togo',
'TH': 'Thailand',
'TJ': 'Tajikistan',
'TK': 'Tokelau',
'TL': 'Timor-Leste',
'TM': 'Turkmenistan',
'TN': 'Tunisia',
'TO': 'Tonga',
'TR': 'Turkey',
'TT': 'Trinidad and Tobago',
'TV': 'Tuvalu',
'TW': 'Taiwan, Province of China',
'TZ': 'Tanzania, United Republic of',
'UA': 'Ukraine',
'UG': 'Uganda',
#'UK': 'United Kingdom',
'UM': 'United States Minor Outlying Islands',
'US': 'United States',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VA': 'Holy See (Vatican City State)',
'VC': 'Saint Vincent and the Grenadines',
'VE': 'Venezuela',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'VN': 'Viet Nam',
'VU': 'Vanuatu',
'WF': 'Wallis and Futuna',
'WS': 'Samoa',
'YE': 'Yemen',
'YT': 'Mayotte',
'YU': 'Yugoslavia',
'ZA': 'South Africa',
'ZM': 'Zambia',
'ZW': 'Zimbabwe'}
country_list = list(country_code.iteritems())
country_list.sort(key=lambda x: x[1])
# country_list.insert(1, country_list.pop(-1)) # fix Åland Islands' position
output = open('countries.py', 'w')
output.write('from django.utils.translation import ugettext as _\n\n')
output.write('COUNTRIES = (\n')
for cc, name in country_list:
output.write(" ('%s', _('%s')),\n" % (cc, name.encode('string-escape')))
output.write(" ('ZZ', _('Unknown or unspecified country')),\n")
output.write(')\n')
output.close()
| gpl-2.0 |
denny820909/builder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_util_bbcollections.py | 4 | 2388 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.util import bbcollections
class KeyedSets(unittest.TestCase):
def setUp(self):
self.ks = bbcollections.KeyedSets()
def test_getitem_default(self):
self.assertEqual(self.ks['x'], set())
# remaining tests effectively cover __getitem__
def test_add(self):
self.ks.add('y', 2)
self.assertEqual(self.ks['y'], set([2]))
def test_add_twice(self):
self.ks.add('z', 2)
self.ks.add('z', 4)
self.assertEqual(self.ks['z'], set([2, 4]))
def test_discard_noError(self):
self.ks.add('full', 12)
self.ks.discard('empty', 13) # should not fail
self.ks.discard('full', 13) # nor this
self.assertEqual(self.ks['full'], set([12]))
def test_discard_existing(self):
self.ks.add('yarn', 'red')
self.ks.discard('yarn', 'red')
self.assertEqual(self.ks['yarn'], set([]))
def test_contains_true(self):
self.ks.add('yarn', 'red')
self.assertTrue('yarn' in self.ks)
def test_contains_false(self):
self.assertFalse('yarn' in self.ks)
def test_contains_setNamesNotContents(self):
self.ks.add('yarn', 'red')
self.assertFalse('red' in self.ks)
def test_pop_exists(self):
self.ks.add('names', 'pop')
self.ks.add('names', 'coke')
self.ks.add('names', 'soda')
popped = self.ks.pop('names')
remaining = self.ks['names']
self.assertEqual((popped, remaining),
(set(['pop', 'coke', 'soda']), set()))
def test_pop_missing(self):
self.assertEqual(self.ks.pop('flavors'), set())
| mit |
yawnosnorous/python-for-android | python3-alpha/python3-src/Tools/demo/markov.py | 107 | 3685 | #!/usr/bin/env python3
"""
Markov chain simulation of words or characters.
"""
class Markov:
def __init__(self, histsize, choice):
self.histsize = histsize
self.choice = choice
self.trans = {}
def add(self, state, next):
self.trans.setdefault(state, []).append(next)
def put(self, seq):
n = self.histsize
add = self.add
add(None, seq[:0])
for i in range(len(seq)):
add(seq[max(0, i-n):i], seq[i:i+1])
add(seq[len(seq)-n:], None)
def get(self):
choice = self.choice
trans = self.trans
n = self.histsize
seq = choice(trans[None])
while True:
subseq = seq[max(0, len(seq)-n):]
options = trans[subseq]
next = choice(options)
if not next:
break
seq += next
return seq
def test():
import sys, random, getopt
args = sys.argv[1:]
try:
opts, args = getopt.getopt(args, '0123456789cdwq')
except getopt.error:
print('Usage: %s [-#] [-cddqw] [file] ...' % sys.argv[0])
print('Options:')
print('-#: 1-digit history size (default 2)')
print('-c: characters (default)')
print('-w: words')
print('-d: more debugging output')
print('-q: no debugging output')
print('Input files (default stdin) are split in paragraphs')
print('separated blank lines and each paragraph is split')
print('in words by whitespace, then reconcatenated with')
print('exactly one space separating words.')
print('Output consists of paragraphs separated by blank')
print('lines, where lines are no longer than 72 characters.')
sys.exit(2)
histsize = 2
do_words = False
debug = 1
for o, a in opts:
if '-0' <= o <= '-9': histsize = int(o[1:])
if o == '-c': do_words = False
if o == '-d': debug += 1
if o == '-q': debug = 0
if o == '-w': do_words = True
if not args:
args = ['-']
m = Markov(histsize, random.choice)
try:
for filename in args:
if filename == '-':
f = sys.stdin
if f.isatty():
print('Sorry, need stdin from file')
continue
else:
f = open(filename, 'r')
if debug: print('processing', filename, '...')
text = f.read()
f.close()
paralist = text.split('\n\n')
for para in paralist:
if debug > 1: print('feeding ...')
words = para.split()
if words:
if do_words:
data = tuple(words)
else:
data = ' '.join(words)
m.put(data)
except KeyboardInterrupt:
print('Interrupted -- continue with data read so far')
if not m.trans:
print('No valid input files')
return
if debug: print('done.')
if debug > 1:
for key in m.trans.keys():
if key is None or len(key) < histsize:
print(repr(key), m.trans[key])
if histsize == 0: print(repr(''), m.trans[''])
print()
while True:
data = m.get()
if do_words:
words = data
else:
words = data.split()
n = 0
limit = 72
for w in words:
if n + len(w) > limit:
print()
n = 0
print(w, end=' ')
n += len(w) + 1
print()
print()
if __name__ == "__main__":
test()
| apache-2.0 |
barnsnake351/nova | nova/pci/devspec.py | 69 | 6570 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
from nova import exception
from nova.pci import utils
MAX_VENDOR_ID = 0xFFFF
MAX_PRODUCT_ID = 0xFFFF
MAX_FUNC = 0x7
MAX_DOMAIN = 0xFFFF
MAX_BUS = 0xFF
MAX_SLOT = 0x1F
ANY = '*'
VIRTFN_RE = re.compile("virtfn\d+")
def get_value(v):
return ast.literal_eval("0x" + v)
def get_pci_dev_info(pci_obj, property, max, hex_value):
a = getattr(pci_obj, property)
if a == ANY:
return
v = get_value(a)
if v > max:
raise exception.PciConfigInvalidWhitelist(
reason = "invalid %s %s" % (property, a))
setattr(pci_obj, property, hex_value % v)
class PciAddress(object):
"""Manages the address fields of the whitelist.
This class checks the address fields of the pci_passthrough_whitelist
configuration option, validating the address fields.
Example config are:
| pci_passthrough_whitelist = {"address":"*:0a:00.*",
| "physical_network":"physnet1"}
| pci_passthrough_whitelist = {"vendor_id":"1137","product_id":"0071"}
This function class will validate the address fields, check for wildcards,
and insert wildcards where the field is left blank.
"""
def __init__(self, pci_addr, is_physical_function):
self.domain = ANY
self.bus = ANY
self.slot = ANY
self.func = ANY
self.is_physical_function = is_physical_function
self._init_address_fields(pci_addr)
def _check_physical_function(self):
if ANY in (self.domain, self.bus, self.slot, self.func):
return
self.is_physical_function = utils.is_physical_function(self)
def _init_address_fields(self, pci_addr):
if self.is_physical_function:
(self.domain, self.bus, self.slot,
self.func) = utils.get_pci_address_fields(pci_addr)
return
dbs, sep, func = pci_addr.partition('.')
if func:
fstr = func.strip()
if fstr != ANY:
try:
f = get_value(fstr)
except SyntaxError:
raise exception.PciDeviceWrongAddressFormat(
address=pci_addr)
if f > MAX_FUNC:
raise exception.PciDeviceInvalidAddressField(
address=pci_addr, field="function")
self.func = "%1x" % f
if dbs:
dbs_fields = dbs.split(':')
if len(dbs_fields) > 3:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
# If we got a partial address like ":00.", we need to to turn this
# into a domain of ANY, a bus of ANY, and a slot of 00. This code
# allows the address bus and/or domain to be left off
dbs_all = [ANY for x in range(3 - len(dbs_fields))]
dbs_all.extend(dbs_fields)
dbs_checked = [s.strip() or ANY for s in dbs_all]
self.domain, self.bus, self.slot = dbs_checked
get_pci_dev_info(self, 'domain', MAX_DOMAIN, '%04x')
get_pci_dev_info(self, 'bus', MAX_BUS, '%02x')
get_pci_dev_info(self, 'slot', MAX_SLOT, '%02x')
self._check_physical_function()
def match(self, pci_addr, pci_phys_addr):
# Assume this is called given pci_add and pci_phys_addr from libvirt,
# no attempt is made to verify pci_addr is a VF of pci_phys_addr
if self.is_physical_function:
if not pci_phys_addr:
return False
domain, bus, slot, func = (
utils.get_pci_address_fields(pci_phys_addr))
return (self.domain == domain and self.bus == bus and
self.slot == slot and self.func == func)
else:
domain, bus, slot, func = (
utils.get_pci_address_fields(pci_addr))
conditions = [
self.domain in (ANY, domain),
self.bus in (ANY, bus),
self.slot in (ANY, slot),
self.func in (ANY, func)
]
return all(conditions)
class PciDeviceSpec(object):
def __init__(self, dev_spec):
self.tags = dev_spec
self._init_dev_details()
self.dev_count = 0
def _init_dev_details(self):
self.vendor_id = self.tags.pop("vendor_id", ANY)
self.product_id = self.tags.pop("product_id", ANY)
self.address = self.tags.pop("address", None)
self.dev_name = self.tags.pop("devname", None)
self.vendor_id = self.vendor_id.strip()
get_pci_dev_info(self, 'vendor_id', MAX_VENDOR_ID, '%04x')
get_pci_dev_info(self, 'product_id', MAX_PRODUCT_ID, '%04x')
pf = False
if self.address and self.dev_name:
raise exception.PciDeviceInvalidDeviceName()
if not self.address:
if self.dev_name:
self.address, pf = utils.get_function_by_ifname(
self.dev_name)
if not self.address:
raise exception.PciDeviceNotFoundById(id=self.dev_name)
else:
self.address = "*:*:*.*"
self.address = PciAddress(self.address, pf)
def match(self, dev_dict):
conditions = [
self.vendor_id in (ANY, dev_dict['vendor_id']),
self.product_id in (ANY, dev_dict['product_id']),
self.address.match(dev_dict['address'],
dev_dict.get('phys_function'))
]
return all(conditions)
def match_pci_obj(self, pci_obj):
if pci_obj.extra_info:
phy_func = pci_obj.extra_info.get('phys_function')
else:
phy_func = None
return self.match({'vendor_id': pci_obj.vendor_id,
'product_id': pci_obj.product_id,
'address': pci_obj.address,
'phys_function': phy_func})
def get_tags(self):
return self.tags
| apache-2.0 |
leiferikb/bitpop | src/tools/prepare-bisect-perf-regression.py | 35 | 2332 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepare Performance Test Bisect Tool
This script is used by a trybot to create a working directory and sync an
initial copy of the depot for use in bisecting performance regressions.
An example usage:
./tools/prepare-bisect-perf-regressions.py --working_directory "~/builds"
--output_buildbot_annotations
Would result in creating ~/builds/bisect and then populating it with a copy of
the depot.
"""
import optparse
import sys
import bisect_utils
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Prepares a temporary depot for use on a trybot.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='Path to the working directory where the script will '
'do an initial checkout of the chromium depot. The '
'files will be placed in a subdirectory "bisect" under '
'working_directory and that will be used to perform the '
'bisection.')
parser.add_option('--output_buildbot_annotations',
action="store_true",
help='Add extra annotation output for buildbot.')
parser.add_option('--target_platform',
type='choice',
choices=['chromium', 'cros', 'android'],
default='chromium',
help='The target platform. Choices are "chromium" (current '
'platform), "cros", or "android". If you specify something '
'other than "chromium", you must be properly set up to '
'build that platform.')
(opts, args) = parser.parse_args()
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory'
print
parser.print_help()
return 1
if not bisect_utils.CheckIfBisectDepotExists(opts):
try:
bisect_utils.CreateBisectDirectoryAndSetupDepot(opts,
bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS)
except RuntimeError:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
bdang2012/taiga-back-casting | taiga/export_import/service.py | 1 | 21533 | # Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import gc
import resource
import os
import os.path as path
import uuid
from unidecode import unidecode
from django.template.defaultfilters import slugify
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import default_storage
from taiga.base.utils import json
from taiga.projects.history.services import make_key_from_model_object, take_snapshot
from taiga.timeline.service import build_project_namespace
from taiga.projects.references import sequences as seq
from taiga.projects.references import models as refs
from taiga.projects.userstories.models import RolePoints
from taiga.projects.services import find_invited_user
from taiga.base.api.fields import get_component
from . import serializers
_errors_log = {}
def get_errors(clear=True):
_errors = _errors_log.copy()
if clear:
_errors_log.clear()
return _errors
def add_errors(section, errors):
if section in _errors_log:
_errors_log[section].append(errors)
else:
_errors_log[section] = [errors]
def render_project(project, outfile, chunk_size = 8190):
serializer = serializers.ProjectExportSerializer(project)
outfile.write('{\n')
first_field = True
for field_name in serializer.fields.keys():
# Avoid writing "," in the last element
if not first_field:
outfile.write(",\n")
else:
first_field = False
field = serializer.fields.get(field_name)
field.initialize(parent=serializer, field_name=field_name)
# These four "special" fields hava attachments so we use them in a special way
if field_name in ["wiki_pages", "user_stories", "tasks", "issues"]:
value = get_component(project, field_name)
outfile.write('"{}": [\n'.format(field_name))
attachments_field = field.fields.pop("attachments", None)
if attachments_field:
attachments_field.initialize(parent=field, field_name="attachments")
first_item = True
for item in value.iterator():
# Avoid writing "," in the last element
if not first_item:
outfile.write(",\n")
else:
first_item = False
dumped_value = json.dumps(field.to_native(item))
writing_value = dumped_value[:-1]+ ',\n "attachments": [\n'
outfile.write(writing_value)
first_attachment = True
for attachment in item.attachments.iterator():
# Avoid writing "," in the last element
if not first_attachment:
outfile.write(",\n")
else:
first_attachment = False
# Write all the data expect the serialized file
attachment_serializer = serializers.AttachmentExportSerializer(instance=attachment)
attached_file_serializer = attachment_serializer.fields.pop("attached_file")
dumped_value = json.dumps(attachment_serializer.data)
dumped_value = dumped_value[:-1] + ',\n "attached_file":{\n "data":"'
outfile.write(dumped_value)
# We write the attached_files by chunks so the memory used is not increased
attachment_file = attachment.attached_file
with default_storage.open(attachment_file.name) as f:
while True:
bin_data = f.read(chunk_size)
if not bin_data:
break
b64_data = base64.b64encode(bin_data).decode('utf-8')
outfile.write(b64_data)
outfile.write('", \n "name":"{}"}}\n}}'.format(os.path.basename(attachment_file.name)))
outfile.write(']}')
outfile.flush()
gc.collect()
outfile.write(']')
else:
value = field.field_to_native(project, field_name)
outfile.write('"{}": {}'.format(field_name, json.dumps(value)))
outfile.write('}\n')
def store_project(data):
project_data = {}
for key, value in data.items():
excluded_fields = [
"default_points", "default_us_status", "default_task_status",
"default_priority", "default_severity", "default_issue_status",
"default_issue_type", "memberships", "points", "us_statuses",
"task_statuses", "issue_statuses", "priorities", "severities",
"issue_types", "userstorycustomattributes", "taskcustomattributes",
"issuecustomattributes", "roles", "milestones", "wiki_pages",
"wiki_links", "notify_policies", "user_stories", "issues", "tasks",
]
if key not in excluded_fields:
project_data[key] = value
serialized = serializers.ProjectExportSerializer(data=project_data)
if serialized.is_valid():
serialized.object._importing = True
serialized.object.save()
serialized.save_watchers()
return serialized
add_errors("project", serialized.errors)
return None
def _store_choice(project, data, field, serializer):
serialized = serializer(data=data)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized.object
add_errors(field, serialized.errors)
return None
def store_choices(project, data, field, serializer):
result = []
for choice_data in data.get(field, []):
result.append(_store_choice(project, choice_data, field, serializer))
return result
def _store_custom_attribute(project, data, field, serializer):
serialized = serializer(data=data)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized.object
add_errors(field, serialized.errors)
return None
def store_custom_attributes(project, data, field, serializer):
result = []
for custom_attribute_data in data.get(field, []):
result.append(_store_custom_attribute(project, custom_attribute_data, field, serializer))
return result
def store_custom_attributes_values(obj, data_values, obj_field, serializer_class):
data = {
obj_field: obj.id,
"attributes_values": data_values,
}
try:
custom_attributes_values = obj.custom_attributes_values
serializer = serializer_class(custom_attributes_values, data=data)
except ObjectDoesNotExist:
serializer = serializer_class(data=data)
if serializer.is_valid():
serializer.save()
return serializer
add_errors("custom_attributes_values", serializer.errors)
return None
def _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes, values):
ret = {}
for attr in custom_attributes:
value = values.get(attr["name"], None)
if value is not None:
ret[str(attr["id"])] = value
return ret
def store_role(project, role):
serialized = serializers.RoleExportSerializer(data=role)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized
add_errors("roles", serialized.errors)
return None
def store_roles(project, data):
results = []
for role in data.get("roles", []):
serialized = store_role(project, role)
if serialized:
results.append(serialized)
return results
def store_default_choices(project, data):
def helper(project, field, related, data):
if field in data:
value = related.all().get(name=data[field])
else:
value = related.all().first()
setattr(project, field, value)
helper(project, "default_points", project.points, data)
helper(project, "default_issue_type", project.issue_types, data)
helper(project, "default_issue_status", project.issue_statuses, data)
helper(project, "default_us_status", project.us_statuses, data)
helper(project, "default_task_status", project.task_statuses, data)
helper(project, "default_priority", project.priorities, data)
helper(project, "default_severity", project.severities, data)
project._importing = True
project.save()
def store_membership(project, membership):
serialized = serializers.MembershipExportSerializer(data=membership, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.object.token = str(uuid.uuid1())
serialized.object.user = find_invited_user(serialized.object.email,
default=serialized.object.user)
serialized.save()
return serialized
add_errors("memberships", serialized.errors)
return None
def store_memberships(project, data):
results = []
for membership in data.get("memberships", []):
results.append(store_membership(project, membership))
return results
def store_task(project, data):
if "status" not in data and project.default_task_status:
data["status"] = project.default_task_status.name
serialized = serializers.TaskExportSerializer(data=data, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
if serialized.object.ref:
sequence_name = refs.make_sequence_name(project)
if not seq.exists(sequence_name):
seq.create(sequence_name)
seq.set_max(sequence_name, serialized.object.ref)
else:
serialized.object.ref, _ = refs.make_reference(serialized.object, project)
serialized.object.save()
for task_attachment in data.get("attachments", []):
store_attachment(project, serialized.object, task_attachment)
history_entries = data.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
custom_attributes_values = data.get("custom_attributes_values", None)
if custom_attributes_values:
custom_attributes = serialized.object.project.taskcustomattributes.all().values('id', 'name')
custom_attributes_values = _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes,
custom_attributes_values)
store_custom_attributes_values(serialized.object, custom_attributes_values,
"task", serializers.TaskCustomAttributesValuesExportSerializer)
return serialized
add_errors("tasks", serialized.errors)
return None
def store_milestone(project, milestone):
serialized = serializers.MilestoneExportSerializer(data=milestone, project=project)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
serialized.save_watchers()
for task_without_us in milestone.get("tasks_without_us", []):
task_without_us["user_story"] = None
store_task(project, task_without_us)
return serialized
add_errors("milestones", serialized.errors)
return None
def store_attachment(project, obj, attachment):
serialized = serializers.AttachmentExportSerializer(data=attachment)
if serialized.is_valid():
serialized.object.content_type = ContentType.objects.get_for_model(obj.__class__)
serialized.object.object_id = obj.id
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object.size = serialized.object.attached_file.size
serialized.object.name = path.basename(serialized.object.attached_file.name)
serialized.save()
return serialized
add_errors("attachments", serialized.errors)
return serialized
def store_timeline_entry(project, timeline):
serialized = serializers.TimelineExportSerializer(data=timeline, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
serialized.object.namespace = build_project_namespace(project)
serialized.object.object_id = project.id
serialized.object._importing = True
serialized.save()
return serialized
add_errors("timeline", serialized.errors)
return serialized
def store_history(project, obj, history):
serialized = serializers.HistoryExportSerializer(data=history, context={"project": project})
if serialized.is_valid():
serialized.object.key = make_key_from_model_object(obj)
if serialized.object.diff is None:
serialized.object.diff = []
serialized.object._importing = True
serialized.save()
return serialized
add_errors("history", serialized.errors)
return serialized
def store_wiki_page(project, wiki_page):
wiki_page["slug"] = slugify(unidecode(wiki_page.get("slug", "")))
serialized = serializers.WikiPageExportSerializer(data=wiki_page)
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
for attachment in wiki_page.get("attachments", []):
store_attachment(project, serialized.object, attachment)
history_entries = wiki_page.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
return serialized
add_errors("wiki_pages", serialized.errors)
return None
def store_wiki_link(project, wiki_link):
serialized = serializers.WikiLinkExportSerializer(data=wiki_link)
if serialized.is_valid():
serialized.object.project = project
serialized.object._importing = True
serialized.save()
return serialized
add_errors("wiki_links", serialized.errors)
return None
def store_role_point(project, us, role_point):
serialized = serializers.RolePointsExportSerializer(data=role_point, context={"project": project})
if serialized.is_valid():
try:
existing_role_point = us.role_points.get(role=serialized.object.role)
existing_role_point.points = serialized.object.points
existing_role_point.save()
return existing_role_point
except RolePoints.DoesNotExist:
serialized.object.user_story = us
serialized.save()
return serialized.object
add_errors("role_points", serialized.errors)
return None
def store_user_story(project, data):
if "status" not in data and project.default_us_status:
data["status"] = project.default_us_status.name
us_data = {key: value for key, value in data.items() if key not in ["role_points", "custom_attributes_values"]}
serialized = serializers.UserStoryExportSerializer(data=us_data, context={"project": project})
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
if serialized.object.ref:
sequence_name = refs.make_sequence_name(project)
if not seq.exists(sequence_name):
seq.create(sequence_name)
seq.set_max(sequence_name, serialized.object.ref)
else:
serialized.object.ref, _ = refs.make_reference(serialized.object, project)
serialized.object.save()
for us_attachment in data.get("attachments", []):
store_attachment(project, serialized.object, us_attachment)
for role_point in data.get("role_points", []):
store_role_point(project, serialized.object, role_point)
history_entries = data.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
custom_attributes_values = data.get("custom_attributes_values", None)
if custom_attributes_values:
custom_attributes = serialized.object.project.userstorycustomattributes.all().values('id', 'name')
custom_attributes_values = _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes,
custom_attributes_values)
store_custom_attributes_values(serialized.object, custom_attributes_values,
"user_story", serializers.UserStoryCustomAttributesValuesExportSerializer)
return serialized
add_errors("user_stories", serialized.errors)
return None
def store_issue(project, data):
serialized = serializers.IssueExportSerializer(data=data, context={"project": project})
if "type" not in data and project.default_issue_type:
data["type"] = project.default_issue_type.name
if "status" not in data and project.default_issue_status:
data["status"] = project.default_issue_status.name
if "priority" not in data and project.default_priority:
data["priority"] = project.default_priority.name
if "severity" not in data and project.default_severity:
data["severity"] = project.default_severity.name
if serialized.is_valid():
serialized.object.project = project
if serialized.object.owner is None:
serialized.object.owner = serialized.object.project.owner
serialized.object._importing = True
serialized.object._not_notify = True
serialized.save()
serialized.save_watchers()
if serialized.object.ref:
sequence_name = refs.make_sequence_name(project)
if not seq.exists(sequence_name):
seq.create(sequence_name)
seq.set_max(sequence_name, serialized.object.ref)
else:
serialized.object.ref, _ = refs.make_reference(serialized.object, project)
serialized.object.save()
for attachment in data.get("attachments", []):
store_attachment(project, serialized.object, attachment)
history_entries = data.get("history", [])
for history in history_entries:
store_history(project, serialized.object, history)
if not history_entries:
take_snapshot(serialized.object, user=serialized.object.owner)
custom_attributes_values = data.get("custom_attributes_values", None)
if custom_attributes_values:
custom_attributes = serialized.object.project.issuecustomattributes.all().values('id', 'name')
custom_attributes_values = _use_id_instead_name_as_key_in_custom_attributes_values(custom_attributes,
custom_attributes_values)
store_custom_attributes_values(serialized.object, custom_attributes_values,
"issue", serializers.IssueCustomAttributesValuesExportSerializer)
return serialized
add_errors("issues", serialized.errors)
return None
| agpl-3.0 |
ajmarks/wal-e | wal_e/worker/s3/s3_deleter.py | 14 | 1104 | from wal_e import exception
from wal_e import retries
from wal_e.worker.base import _Deleter
class Deleter(_Deleter):
@retries.retry()
def _delete_batch(self, page):
# Check that all keys are in the same bucket; this code is not
# designed to deal with fast deletion of keys from multiple
# buckets at the same time, and not checking this could result
# in deleting similarly named keys from the wrong bucket.
#
# In wal-e's use, homogeneity of the bucket retaining the keys
# is presumed to be always the case.
bucket_name = page[0].bucket.name
for key in page:
if key.bucket.name != bucket_name:
raise exception.UserCritical(
msg='submitted keys are not part of the same bucket',
detail=('The clashing bucket names are {0} and {1}.'
.format(key.bucket.name, bucket_name)),
hint='This should be reported as a bug.')
bucket = page[0].bucket
bucket.delete_keys([key.name for key in page])
| bsd-3-clause |
fpt-software/seniot-data-workflow | nodes/core/hardware/nrgpio.py | 38 | 6200 | #!/usr/bin/python
#
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Import library functions we need
import RPi.GPIO as GPIO
import sys
bounce = 20 # bounce time in mS to apply
if sys.version_info >= (3,0):
print("Sorry - currently only configured to work with python 2.x")
sys.exit(1)
if len(sys.argv) > 1:
cmd = sys.argv[1].lower()
pin = int(sys.argv[2])
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
if cmd == "pwm":
#print "Initialised pin "+str(pin)+" to PWM"
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
p.ChangeDutyCycle(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print "bad data: "+data
elif cmd == "buzz":
#print "Initialised pin "+str(pin)+" to Buzz"
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.stop()
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
elif float(data) == 0:
p.stop()
else:
p.start(50)
p.ChangeFrequency(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print "bad data: "+data
elif cmd == "out":
#print "Initialised pin "+str(pin)+" to OUT"
GPIO.setup(pin,GPIO.OUT)
if len(sys.argv) == 4:
GPIO.output(pin,int(sys.argv[3]))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except:
data = 0
if data != 0:
data = 1
GPIO.output(pin,data)
elif cmd == "in":
#print "Initialised pin "+str(pin)+" to IN"
def handle_callback(chan):
print GPIO.input(chan)
if len(sys.argv) == 4:
if sys.argv[3].lower() == "up":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_UP)
elif sys.argv[3].lower() == "down":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_DOWN)
else:
GPIO.setup(pin,GPIO.IN)
else:
GPIO.setup(pin,GPIO.IN)
print GPIO.input(pin)
GPIO.add_event_detect(pin, GPIO.BOTH, callback=handle_callback, bouncetime=bounce)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
elif cmd == "byte":
#print "Initialised BYTE mode - "+str(pin)+
list = [7,11,13,12,15,16,18,22]
GPIO.setup(list,GPIO.OUT)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
for bit in range(8):
if pin == 1:
mask = 1 << (7 - bit)
else:
mask = 1 << bit
GPIO.output(list[bit], data & mask)
elif cmd == "borg":
#print "Initialised BORG mode - "+str(pin)+
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
r = GPIO.PWM(11, 100)
g = GPIO.PWM(13, 100)
b = GPIO.PWM(15, 100)
r.start(0)
g.start(0)
b.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
c = data.split(",")
r.ChangeDutyCycle(float(c[0]))
g.ChangeDutyCycle(float(c[1]))
b.ChangeDutyCycle(float(c[2]))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
elif cmd == "rev":
print GPIO.RPI_REVISION
elif cmd == "ver":
print GPIO.VERSION
elif cmd == "mouse": # catch mice button events
file = open( "/dev/input/mice", "rb" )
oldbutt = 0
def getMouseEvent():
global oldbutt
global pin
buf = file.read(3)
pin = pin & 0x07
button = ord( buf[0] ) & pin # mask out just the required button(s)
if button != oldbutt: # only send if changed
oldbutt = button
print button
while True:
try:
getMouseEvent()
except:
file.close()
sys.exit(0)
else:
print "Bad parameters - {in|out|pwm} {pin} {value|up|down}"
| apache-2.0 |
MwanzanFelipe/rockletonfortune | mysite/settings.py | 2 | 3895 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-c&qt=71oi^e5s8(ene*$b89^#%*0xeve$x_trs91veok9#0h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# [START db_setup]
import os
if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'):
# Running on production App Engine, so use a Google Cloud SQL database.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/<your-project-id>:<your-cloud-sql-instance>',
'NAME': '<your-database-name>',
'USER': 'root',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '<your-database-name>',
'USER': '<your-database-user>',
'PASSWORD': '<your-database-password>',
'HOST': '<your-database-host>',
'PORT': '3306',
}
}
# [END db_setup]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT='static'
STATIC_URL = '/static/'
| bsd-3-clause |
niwinz/Green-Mine | src/greenmine/urls.py | 1 | 2396 | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
js_info_dict = {
'packages': ('greenmine',),
}
from greenmine.base.views import api
from greenmine.base.views import config
from greenmine.scrum.views import main
internapi_urlpatterns = patterns('',
url(r'^autocomplete/user/list/$', api.UserListApiView.as_view(), name='user-list'),
url(r'^i18n/lang/$', api.I18NLangChangeApiView.as_view(),
name='i18n-setlang'),
)
# FIXME: project administration is pending to refactor.
main_patterns = patterns('',
url(r'^$', main.HomeView.as_view(), name='projects'),
url(r'^users/$', main.UserList.as_view(), name='users'),
url(r'^users/create/$', main.UserCreateView.as_view(), name='users-create'),
url(r'^users/(?P<uid>\d+)/view/$', main.UserView.as_view(), name='users-view'),
url(r'^users/(?P<uid>\d+)/edit/$', main.UserEditView.as_view(), name='users-edit'),
url(r'^users/(?P<uid>\d+)/delete/$', main.UserDelete.as_view(), name='users-delete'),
url(r'^users/(?P<uid>\d+)/send/recovery/password/',
main.SendRecoveryPasswordView.as_view(), name='users-recovery-password'),
)
urlpatterns = patterns('',
url(r"^auth/", include("greenmine.profile.urls")),
url(r"^project/", include("greenmine.scrum.urls")),
url(r"^project/(?P<pslug>[\w\d\-]+)/wiki/", include("greenmine.wiki.urls")),
url(r"^project/(?P<pslug>[\w\d\-]+)/questions/", include("greenmine.questions.urls")),
url(r"^project/(?P<pslug>[\w\d\-]+)/documents/", include("greenmine.documents.urls")),
url(r'^search/', include('greenmine.search.urls')),
url(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict, name='jsi18n'),
url(r"^intern-api/", include(internapi_urlpatterns, namespace='api')),
url(r"^", include(main_patterns)),
)
def mediafiles_urlpatterns():
"""
Method for serve media files with runserver.
"""
_media_url = settings.MEDIA_URL
if _media_url.startswith('/'):
_media_url = _media_url[1:]
from django.views.static import serve
return patterns('',
(r'^%s(?P<path>.*)$' % _media_url, serve,
{'document_root': settings.MEDIA_ROOT})
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += mediafiles_urlpatterns()
| bsd-3-clause |
sominn/android_kernel_samsung_golden | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
40223226/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/collections/__init__.py | 625 | 25849 | #__all__ = ['deque', 'defaultdict', 'Counter']
from _collections import deque, defaultdict
#from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
# fixme brython.. there is an issue with _abcoll
#from _abcoll import *
#from _abcoll import Set
from _abcoll import MutableMapping
#import _abcoll
#__all__ += _abcoll.__all__
from collections.abc import *
import collections.abc
__all__ += collections.abc.__all__
from _collections import deque, defaultdict, namedtuple
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
#fixme brython
#from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
class Set(set):
pass
class Sequence(list):
pass
def _proxy(obj):
return obj
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
#fixme brython.. Issue with _abcoll, which contains MutableMapping
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
#fixme, brython issue
#@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
#try: # Load C helper function if available
# from _collections import _count_elements
#except ImportError:
# pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
#super().__init__() #BE modified since super not supported
dict.__init__(self)
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
#fixme, brython
#@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
def __repr__(self):
return ','.join(str(_map) for _map in self.maps)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
#raise KeyError('Key not found in the first mapping: {!r}'.format(key))
raise KeyError('Key not found in the first mapping: %s' % key)
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
################################################################################
### UserString
################################################################################
| gpl-3.0 |
Erkan-Yilmaz/twister-core | libtorrent/tools/parse_disk_access.py | 57 | 2523 | #! /usr/bin/env python
import os, sys, time
lines = open(sys.argv[1], 'rb').readlines()
# logfile format:
# <time(us)> <key>: <value>
# example:
# 16434 read cache: 17
keys = ['read', 'write', 'head movement', 'seek per read byte', 'seek per written byte',
'read operations per second', 'write operations per second']
colors = ['305030', '503030', '3030f0', '10a010', 'a01010', 'd0d040', 'd040d0']
style = ['dots', 'points', 'lines', 'lines', 'lines', 'lines', 'lines']
axis = ['x1y1', 'x1y1', 'x1y2', 'x1y2', 'x1y2', 'x1y2', 'x1y2']
plot = [True, False, False, False, False, True, False]
out = open('disk_access_log.dat', 'w+')
time = 1000000
last_pos = 0
last_t = 0
cur_movement = 0
cur_read = 0
cur_write = 0
cur_read_ops = 0
cur_write_ops = 0
for l in lines:
try:
# strip newline
l = l[0:-1].split(' ')
t = int(l[0])
k = l[1]
n = int(l[2])
except:
print l
continue
read = '-'
write = '-'
movement = '-'
amount_read = '-'
amount_write = '-'
read_ops = '-'
write_ops = '-'
if k == 'read':
read = '%d' % n
cur_read_ops += 1
if k == 'write':
write = '%d' % n
cur_write_ops += 1
if k == 'read_end': cur_read += n - last_pos
if k == 'write_end': cur_write += n - last_pos
cur_movement += abs(last_pos - n)
last_pos = n
if last_t + time <= t:
movement = '%d' % cur_movement
if cur_read > 0:
amount_read = '%d' % (cur_movement / cur_read)
if cur_write > 0:
amount_write = '%d' % (cur_movement / cur_write)
read_ops = '%d' % cur_read_ops
write_ops = '%d' % cur_write_ops
cur_movement = 0
cur_read = 0
cur_write = 0
last_t = t
cur_read_ops = 0
cur_write_ops = 0
print >>out, '%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (t, read, write, movement, amount_read, amount_write, read_ops, write_ops)
out.close()
out = open('disk_access.gnuplot', 'wb')
print >>out, "set term png size 1200,700"
print >>out, 'set output "disk_access.png"'
print >>out, 'set xrange [*:*]'
#print >>out, 'set y2range [0:*]'
print >>out, 'set xlabel "time (us)"'
print >>out, 'set ylabel "drive offset"'
#print >>out, 'set y2label "bytes / %d second(s)"' % (time / 1000)
print >>out, "set key box"
print >>out, "set tics nomirror"
print >>out, "set y2tics auto"
print >>out, 'plot',
count = 1
for k in keys:
count += 1
if not plot[count-2]: continue
print >>out, ' "disk_access_log.dat" using 1:%d title "%s" with %s lt rgb "#%s" axis %s,' \
% (count, k, style[count-2], colors[count-2], axis[count-2]),
print >>out, 'x=0'
out.close()
os.system('gnuplot disk_access.gnuplot')
| mit |
jlmadurga/django-telegram-bot | tests/settings.py | 1 | 1578 | # flake8: noqa
DEBUG=True,
USE_TZ=True
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
}
ROOT_URLCONF="tests.urls"
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
'django.contrib.sessions',
"rest_framework",
"telegrambot",
"tests"
]
SITE_ID=1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
SECRET_KEY = "shds8dfyhskdfhskdfhskdf"
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'telegrambot.views': {
'handlers': ['console'],
'propagate': False,
'level': 'DEBUG',
},
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True
},
]
TELEGRAM_BOT_HANDLERS_CONF = "tests.bot_handlers"
TELEGRAM_BOT_TOKEN_EXPIRATION = "2" # tow hours before a token expires
| bsd-3-clause |
c-o-m-m-a-n-d-e-r/CouchPotatoServer | libs/tornado/httpserver.py | 66 | 11199 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded HTTP server.
Typical applications have little direct interaction with the `HTTPServer`
class except to start a server at the beginning of the process
(and even that is often done indirectly via `tornado.web.Application.listen`).
.. versionchanged:: 4.0
The ``HTTPRequest`` class that used to live in this module has been moved
to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
"""
from __future__ import absolute_import, division, print_function, with_statement
import socket
from tornado.escape import native_str
from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import netutil
from tornado.tcpserver import TCPServer
class HTTPServer(TCPServer, httputil.HTTPServerConnectionDelegate):
r"""A non-blocking, single-threaded HTTP server.
A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
or, for backwards compatibility, a callback that takes an
`.HTTPServerRequest` as an argument. The delegate is usually a
`tornado.web.Application`.
`HTTPServer` supports keep-alive connections by default
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
requests ``Connection: keep-alive``).
If ``xheaders`` is ``True``, we support the
``X-Real-Ip``/``X-Forwarded-For`` and
``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
remote IP and URI scheme/protocol for all requests. These headers
are useful when running Tornado behind a reverse proxy or load
balancer. The ``protocol`` argument can also be set to ``https``
if Tornado is run behind an SSL-decoding proxy that does not set one of
the supported ``xheaders``.
To make this server serve SSL traffic, send the ``ssl_options`` dictionary
argument with the arguments required for the `ssl.wrap_socket` method,
including ``certfile`` and ``keyfile``. (In Python 3.2+ you can pass
an `ssl.SSLContext` object instead of a dict)::
HTTPServer(applicaton, ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.tcpserver.TCPServer`):
1. `~tornado.tcpserver.TCPServer.listen`: simple single-process::
server = HTTPServer(app)
server.listen(8888)
IOLoop.instance().start()
In many cases, `tornado.web.Application.listen` can be used to avoid
the need to explicitly create the `HTTPServer`.
2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
simple multi-process::
server = HTTPServer(app)
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `HTTPServer` constructor. `~.TCPServer.start` will always start
the server on the default singleton `.IOLoop`.
3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process::
sockets = tornado.netutil.bind_sockets(8888)
tornado.process.fork_processes(0)
server = HTTPServer(app)
server.add_sockets(sockets)
IOLoop.instance().start()
The `~.TCPServer.add_sockets` interface is more complicated,
but it can be used with `tornado.process.fork_processes` to
give you more flexibility in when the fork happens.
`~.TCPServer.add_sockets` can also be used in single-process
servers if you want to create your listening sockets in some
way other than `tornado.netutil.bind_sockets`.
.. versionchanged:: 4.0
Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
arguments. Added support for `.HTTPServerConnectionDelegate`
instances as ``request_callback``.
.. versionchanged:: 4.1
`.HTTPServerConnectionDelegate.start_request` is now called with
two arguments ``(server_conn, request_conn)`` (in accordance with the
documentation) instead of one ``(request_conn)``.
"""
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False, ssl_options=None, protocol=None,
decompress_request=False,
chunk_size=None, max_header_size=None,
idle_connection_timeout=None, body_timeout=None,
max_body_size=None, max_buffer_size=None):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self.protocol = protocol
self.conn_params = HTTP1ConnectionParameters(
decompress=decompress_request,
chunk_size=chunk_size,
max_header_size=max_header_size,
header_timeout=idle_connection_timeout or 3600,
max_body_size=max_body_size,
body_timeout=body_timeout)
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
max_buffer_size=max_buffer_size,
read_chunk_size=chunk_size)
self._connections = set()
@gen.coroutine
def close_all_connections(self):
while self._connections:
# Peek at an arbitrary element of the set
conn = next(iter(self._connections))
yield conn.close()
def handle_stream(self, stream, address):
context = _HTTPRequestContext(stream, address,
self.protocol)
conn = HTTP1ServerConnection(
stream, self.conn_params, context)
self._connections.add(conn)
conn.start_serving(self)
def start_request(self, server_conn, request_conn):
return _ServerRequestAdapter(self, server_conn, request_conn)
def on_close(self, server_conn):
self._connections.remove(server_conn)
class _HTTPRequestContext(object):
def __init__(self, stream, address, protocol):
self.address = address
self.protocol = protocol
# Save the socket's address family now so we know how to
# interpret self.address even after the stream is closed
# and its socket attribute replaced with None.
if stream.socket is not None:
self.address_family = stream.socket.family
else:
self.address_family = None
# In HTTPServerRequest we want an IP, not a full socket address.
if (self.address_family in (socket.AF_INET, socket.AF_INET6) and
address is not None):
self.remote_ip = address[0]
else:
# Unix (or other) socket; fake the remote address.
self.remote_ip = '0.0.0.0'
if protocol:
self.protocol = protocol
elif isinstance(stream, iostream.SSLIOStream):
self.protocol = "https"
else:
self.protocol = "http"
self._orig_remote_ip = self.remote_ip
self._orig_protocol = self.protocol
def __str__(self):
if self.address_family in (socket.AF_INET, socket.AF_INET6):
return self.remote_ip
elif isinstance(self.address, bytes):
# Python 3 with the -bb option warns about str(bytes),
# so convert it explicitly.
# Unix socket addresses are str on mac but bytes on linux.
return native_str(self.address)
else:
return str(self.address)
def _apply_xheaders(self, headers):
"""Rewrite the ``remote_ip`` and ``protocol`` fields."""
# Squid uses X-Forwarded-For, others use X-Real-Ip
ip = headers.get("X-Forwarded-For", self.remote_ip)
ip = ip.split(',')[-1].strip()
ip = headers.get("X-Real-Ip", ip)
if netutil.is_valid_ip(ip):
self.remote_ip = ip
# AWS uses X-Forwarded-Proto
proto_header = headers.get(
"X-Scheme", headers.get("X-Forwarded-Proto",
self.protocol))
if proto_header in ("http", "https"):
self.protocol = proto_header
def _unapply_xheaders(self):
"""Undo changes from `_apply_xheaders`.
Xheaders are per-request so they should not leak to the next
request on the same connection.
"""
self.remote_ip = self._orig_remote_ip
self.protocol = self._orig_protocol
class _ServerRequestAdapter(httputil.HTTPMessageDelegate):
"""Adapts the `HTTPMessageDelegate` interface to the interface expected
by our clients.
"""
def __init__(self, server, server_conn, request_conn):
self.server = server
self.connection = request_conn
self.request = None
if isinstance(server.request_callback,
httputil.HTTPServerConnectionDelegate):
self.delegate = server.request_callback.start_request(
server_conn, request_conn)
self._chunks = None
else:
self.delegate = None
self._chunks = []
def headers_received(self, start_line, headers):
if self.server.xheaders:
self.connection.context._apply_xheaders(headers)
if self.delegate is None:
self.request = httputil.HTTPServerRequest(
connection=self.connection, start_line=start_line,
headers=headers)
else:
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
if self.delegate is None:
self._chunks.append(chunk)
else:
return self.delegate.data_received(chunk)
def finish(self):
if self.delegate is None:
self.request.body = b''.join(self._chunks)
self.request._parse_body()
self.server.request_callback(self.request)
else:
self.delegate.finish()
self._cleanup()
def on_connection_close(self):
if self.delegate is None:
self._chunks = None
else:
self.delegate.on_connection_close()
self._cleanup()
def _cleanup(self):
if self.server.xheaders:
self.connection.context._unapply_xheaders()
HTTPRequest = httputil.HTTPServerRequest
| gpl-3.0 |
YukinoHayakawa/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/msvs/external_builder/gyptest-all.py | 260 | 1878 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that msvs_external_builder being set will invoke the provided
msvs_external_builder_build_cmd and msvs_external_builder_clean_cmd, and will
not invoke MSBuild actions and rules.
"""
import os
import sys
import TestGyp
if int(os.environ.get('GYP_MSVS_VERSION', 0)) < 2010:
sys.exit(0)
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
# without the flag set
test.run_gyp('external.gyp')
test.build('external.gyp', target='external')
test.must_not_exist('external_builder.out')
test.must_exist('msbuild_rule.out')
test.must_exist('msbuild_action.out')
test.must_match('msbuild_rule.out', 'msbuild_rule.py hello.z a b c')
test.must_match('msbuild_action.out', 'msbuild_action.py x y z')
os.remove('msbuild_rule.out')
os.remove('msbuild_action.out')
# with the flag set, using Build
try:
os.environ['GYP_DEFINES'] = 'use_external_builder=1'
test.run_gyp('external.gyp')
test.build('external.gyp', target='external')
finally:
del os.environ['GYP_DEFINES']
test.must_not_exist('msbuild_rule.out')
test.must_not_exist('msbuild_action.out')
test.must_exist('external_builder.out')
test.must_match('external_builder.out', 'external_builder.py build 1 2 3')
os.remove('external_builder.out')
# with the flag set, using Clean
try:
os.environ['GYP_DEFINES'] = 'use_external_builder=1'
test.run_gyp('external.gyp')
test.build('external.gyp', target='external', clean=True)
finally:
del os.environ['GYP_DEFINES']
test.must_not_exist('msbuild_rule.out')
test.must_not_exist('msbuild_action.out')
test.must_exist('external_builder.out')
test.must_match('external_builder.out', 'external_builder.py clean 4 5')
os.remove('external_builder.out')
test.pass_test()
| gpl-3.0 |
shiquanwang/caffe | python/caffe/detector.py | 25 | 3902 | #!/usr/bin/env python
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows ideas in
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO
- R-CNN crop mode / crop with context.
- Bundle with R-CNN model for example.
"""
import numpy as np
import os
import caffe
class Detector(caffe.Net):
"""
Detector extends Net for windowed detection by a list of crops or
selective search proposals.
"""
def __init__(self, model_file, pretrained_file, gpu=False, mean_file=None,
input_scale=None, channel_swap=None):
"""
Take
gpu, mean_file, input_scale, channel_swap: convenience params for
setting mode, mean, input scale, and channel order.
"""
caffe.Net.__init__(self, model_file, pretrained_file)
self.set_phase_test()
if gpu:
self.set_mode_gpu()
else:
self.set_mode_cpu()
if mean_file:
self.set_mean(self.inputs[0], mean_file)
if input_scale:
self.set_input_scale(self.inputs[0], input_scale)
if channel_swap:
self.set_channel_swap(self.inputs[0], channel_swap)
def detect_windows(self, images_windows):
"""
Do windowed detection over given images and windows. Windows are
extracted then warped to the input dimensions of the net.
Take
images_windows: (image filename, window list) iterable.
Give
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
# Extract windows.
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(image[window[0]:window[2],
window[1]:window[3]])
# Run through the net (warping windows to input dimensions).
caffe_in = np.asarray([self.preprocess(self.inputs[0], window_in)
for window_in in window_inputs])
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2,3))
# Package predictions with images and windows.
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
"""
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Take
image_fnames: list
Give
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(image_fnames)
# Run windowed detection on the selective search list.
return self.detect_windows(zip(image_fnames, windows_list))
| bsd-2-clause |
PulsePod/old-www-do-not-use | lib/python2.7/site-packages/flask/templating.py | 783 | 4707 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| mit |
weibocom/opendcp | octans/octans/worker/executor.py | 5 | 6833 | #!/usr/bin/env python
#
# Copyright (C) 2016 Weibo Inc.
#
# This file is part of Opendcp.
#
# Opendcp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# Opendcp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Opendcp. if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
# Author: WhiteBlue
# Time : 2016/07/26
import os
from multiprocessing import Process
from threading import Thread
import signal
import sys
from octans.logger import LogManager
from octans.worker.task import Task
from multiprocessing import Queue
'''
daemon thread to run task
'''
_CMD_NEWTASK = 0
_CMD_STOPTASK = 1
_CMD_CLEARTASK = 2
Logger = LogManager.get_logger("Executor")
_output_null = open("/dev/null", "w")
def _find_child_process(pid):
pid_str = str(pid)
sh = "ps -e -o pid,ppid | awk '{ if($1!=" + pid_str + "&&$2==" + pid_str + "){printf $1\",\"}}'"
t = os.popen(sh)
child_arr = t.read().split(",")
ret = []
for child in child_arr:
if child.strip() is not "" and len(child) != 0:
ret.append(child)
return ret
def _clear_remain_process(pid):
"""
kill the child process that Anisble fork
:param pid: process pid
:return: none
"""
try:
# make main process stop
os.kill(pid, signal.SIGSTOP)
kill_list = []
find_list = [pid]
count = 0
while len(find_list) is not 0 and count < 20:
t_pid = find_list.pop()
ret_list = _find_child_process(t_pid)
kill_list.extend(ret_list)
find_list.extend(ret_list)
count += 1
Logger.debug("kill Ansible fork process: {}".format(str(kill_list)))
for p in kill_list:
try:
os.kill(int(p), signal.SIGKILL)
except Exception as e:
Logger.error("kill process error, error:{}".format(str(e)))
except Exception as e:
Logger.error("clear process error, error:{}".format(str(e)))
class Executor:
"""
a process executor with queue
"""
def __init__(self, service):
self._queue = Queue(-1)
self._thread = None
self._task_list = []
self.service = service
def start(self):
"""
start the loop thread
:return: none
"""
t = Thread(target=self._loop_for_queue)
t.setDaemon(True)
t.start()
def submit(self, task):
"""
submit a new task to queue
:param task: Task instance
:return: none
"""
if not isinstance(task, Task):
raise AttributeError("'task' must instance of Type Task")
self._queue.put((_CMD_NEWTASK, task,))
def stop(self, task_id):
"""
send SIGTERM to a process by task_id
:param task_id: task id
:return: none
"""
self._queue.put((_CMD_STOPTASK, task_id,))
def list(self):
"""
list task in running
:return:
"""
ret = []
for p in self._task_list:
ret.append(p.name)
return ret
def _handle(self, task):
"""
run in a new process
:param task: task obj
:return: none
"""
try:
sys.stdout = _output_null
sys.stderr = _output_null
try:
Logger.info("task run start, task_id: {}".format(task.get_id()))
ret = task.run()
Logger.info("task run start-----------, task_ret: {}".format(ret))
task.success(ret)
Logger.info("task run success, task_id: {}".format(task.get_id()))
except AttributeError as ae:
Logger.info("ansible error see https://github.com/ansible/ansible/issues/14408 we pass --pengtao, ae: {}".format(str(ae)))
pass
except Exception as e:
Logger.error("task run failed, task_id: {} ,error: {}".format(task.get_id(), str(e)))
task.failed(e)
finally:
task.final()
except Exception as out:
Logger.error("task error, error: {}".format(out.message))
finally:
self._queue.put((_CMD_CLEARTASK, task.get_id(),))
def _append_to_list(self, process):
self._task_list.append(process)
def _remove_from_list(self, process_name):
for p in self._task_list:
if p.name == process_name:
self._task_list.remove(p)
return p
return None
def _loop_for_queue(self):
"""
run in a thread receive cmd & arg
"""
while True:
try:
command, obj = self._queue.get()
flag = True
if command == _CMD_NEWTASK:
for p in self._task_list:
if obj.get_id() == p.name:
Logger.info("task already in list, task_id: {}".format(obj.get_id()))
flag = False
if flag:
Logger.debug("submit new task, task_id: {}".format(obj.get_id()))
p = Process(name=obj.get_id(), target=self._handle, args=(obj,))
p.daemon = False
p.start()
self._append_to_list(p)
elif command == _CMD_STOPTASK:
# remove process from list
p = self._remove_from_list(obj)
if p is not None and isinstance(p, Process):
Logger.info("stop task, task_id: {}, pid: {}".format(p.name, p.pid))
# clear Ansible's fork process
os.kill(int(p.pid), signal.SIGINT)
else:
Logger.info("task not found, task_id: {}".format(obj))
elif command == _CMD_CLEARTASK:
p = self._remove_from_list(obj)
if p is not None:
Logger.debug("task removed ,task_id: {}".format(obj))
else:
Logger.debug("task not removed ,task_id: {}".format(obj))
except Exception as e:
Logger.error("loop thread error, error: {}".format(str(e)))
| gpl-2.0 |
azoft-dev-team/imagrium | env/Lib/unittest/test/test_setups.py | 152 | 16460 | import sys
from cStringIO import StringIO
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegexp(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
| mit |
peoplepower/composer-sdk-python | com.ppc.Bot/devices/thermostat/thermostat_sensibo_sky.py | 1 | 3433 | '''
Created on March 27, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
# Device Model
# https://presence.atlassian.net/wiki/display/devices/Thermostat
from devices.thermostat.thermostat import ThermostatDevice
# Set the default rounding to 3 numbers.
from decimal import *
getcontext().prec = 1
class ThermostatSensiboSkyDevice(ThermostatDevice):
"""Sensibo Sky Thermostat Device"""
MEASUREMENT_NAME_POWER_STATUS = 'powerStatus'
# List of Device Types this class is compatible with
DEVICE_TYPES = [4220]
# Minimum setpoint in Celsius
MIN_SETPOINT_C = 7.0
# Maximum setpoint in Celsius
MAX_SETPOINT_C = 30.0
def __init__(self, botengine, device_id, device_type, device_description, precache_measurements=True):
ThermostatDevice.__init__(self, botengine, device_id, device_type, device_description, precache_measurements=precache_measurements)
# The boolean on/off state of this device that was saved
self.saved_state = None
def initialize(self, botengine):
"""
Initialization
:param botengine:
:return:
"""
ThermostatDevice.initialize(self, botengine)
if not hasattr(self, 'saved_state'):
self.saved_state = None
def get_device_type_name(self, language):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Sensibo Sky Thermostat")
def is_on(self, botengine=None):
"""
:param botengine:
:return: True if the unit is on
"""
if ThermostatSensiboSkyDevice.MEASUREMENT_NAME_POWER_STATUS in self.measurements:
return self.measurements[ThermostatSensiboSkyDevice.MEASUREMENT_NAME_POWER_STATUS][0][0]
return False
def on(self, botengine):
"""
Turn the A/C on
:param botengine:
:return:
"""
botengine.get_logger().info("Sensibo: on()")
botengine.send_command(self.device_id, ThermostatSensiboSkyDevice.MEASUREMENT_NAME_POWER_STATUS, "1")
def off(self, botengine):
"""
Turn the A/C off
:param botengine:
:return:
"""
botengine.get_logger().info("Sensibo: off()")
botengine.send_command(self.device_id, ThermostatSensiboSkyDevice.MEASUREMENT_NAME_POWER_STATUS, "0")
def save(self, botengine=None):
"""
Save the current state
:param botengine:
:return:
"""
botengine.get_logger().info("Sensibo: save()")
self.saved_state = self.is_on(botengine)
def restore(self, botengine):
"""
Restore any previously saved state
:param botengine:
:return:
"""
if self.saved_state is not None:
botengine.get_logger().info("Sensibo: restore()")
botengine.send_command(self.device_id, ThermostatSensiboSkyDevice.MEASUREMENT_NAME_POWER_STATUS, str(self.saved_state))
self.saved_state = None
def is_saved(self, botengine=None):
"""
:param botengine:
:return: True if this device's state is already saved
"""
if self.saved_state is None:
return False
return self.saved_state | apache-2.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-0.96/django/db/backends/sqlite3/introspection.py | 33 | 3296 | from django.db.backends.sqlite3.base import quote_name
def get_table_list(cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [(info['name'], info['type'], None, None, None, None,
info['null_ok']) for info in _table_info(cursor, table_name)]
def get_relations(cursor, table_name):
raise NotImplementedError
def get_indexes(cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
indexes = {}
for info in _table_info(cursor, table_name):
indexes[info['name']] = {'primary_key': info['pk'] != 0,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
if not unique:
continue
cursor.execute('PRAGMA index_info(%s)' % quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name]['unique'] = True
return indexes
def _table_info(cursor, name):
cursor.execute('PRAGMA table_info(%s)' % quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
BASE_DATA_TYPES_REVERSE = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'text': 'TextField',
'char': 'CharField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
def __getitem__(self, key):
key = key.lower()
try:
return BASE_DATA_TYPES_REVERSE[key]
except KeyError:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'maxlength': int(m.group(1))})
raise KeyError
DATA_TYPES_REVERSE = FlexibleFieldLookupDict()
| bsd-3-clause |
dev4dev-io/pynt-aws | pyntaws/services/_cloudformation.py | 1 | 11141 | import boto3, botocore
import os, uuid, json, sys
from urlparse import urlparse
from boto3.s3.transfer import S3Transfer
from pyntaws.services._session import AWSSession
import __builtin__
__builtin__.validated_templates = list()
class AWSCloudFormation(AWSSession):
def __init__(self, **kwargs):
super(self.__class__, self).__init__(kwargs['profile_name'])
self.stack_name = kwargs['stack_name']
if len(kwargs) == 2 and 'profile_name' in kwargs and 'stack_name' in kwargs:
# Easy service, for lookups only
self.easy_service = True
else:
self.easy_service = False
self.on_failure = kwargs.get('on_failure', 'ROLLBACK')
if 'template' in kwargs and type(kwargs['template']) == str:
self.template = kwargs['template']
else:
raise Exception('Missing or wrong parameter: template')
self.includes = list()
if 'includes' in kwargs:
if type(kwargs['includes']) == list:
self.includes = kwargs['includes']
else:
raise Exception('Wrong parameter type: includes = {}'.format(type(kwargs['includes'])))
self.resources = list()
if 'resources' in kwargs:
if type(kwargs['resources']) == list:
self.resources = kwargs['resources']
else:
raise Exception('Wrong parameter type: resources = {}'.format(type(kwargs['resources'])))
if 'parameters' in kwargs:
self.parameters = kwargs['parameters']
else:
self.parameters = None
self.template = os.path.abspath(os.path.join(os.getcwd(), './src/main/cloudformation/', self.template))
for idx, template in enumerate(self.includes):
if not os.path.isabs(template):
if template.startswith('./') or template.startswith('../'):
self.includes[idx] = os.path.abspath(os.path.join(os.getcwd(), template))
else:
self.includes[idx] = os.path.abspath(os.path.join(os.getcwd(), './src/main/cloudformation/', template))
if not os.path.isfile(self.includes[idx]):
raise Exception("Can't find template file '{}' ({})".format(template, self.includes[idx]))
for idx, file in enumerate(self.resources):
if not os.path.isabs(file):
if file.startswith('./') or file.startswith('../'):
self.resources[idx] = os.path.abspath(os.path.join(os.getcwd(), file))
else:
self.resources[idx] = os.path.abspath(os.path.join(os.getcwd(), './src/main/resources/', file))
if not os.path.isfile(self.resources[idx]):
raise Exception("Can't find resource file '{}' ({})".format(file, self.resources[idx]))
url = urlparse(kwargs['s3_uri'])
self.s3_bucket = url.netloc
self.s3_key = url.path
if self.s3_key.endswith('/'):
self.s3_key = "%s%s" % (self.s3_key, os.path.basename(self.template))
if self.s3_key.startswith('/'):
self.s3_key = self.s3_key[1:]
__builtin__.aws_cloudformation = self
# @property
# def s3_uri(self):
# return self._s3_uri
def exists(self):
cloudformation = self.session.client('cloudformation')
try:
stack = None
nextToken = None
while not stack:
resp = None
if nextToken:
resp = cloudformation.describe_stacks(StackName = self.stack_name, NextToken = nextToken)
else:
resp = cloudformation.describe_stacks(StackName = self.stack_name)
for stack in resp['Stacks']:
if stack['StackStatus'] in ['CREATE_COMPLETE', 'ROLLBACK_COMPLETE','UPDATE_COMPLETE','UPDATE_ROLLBACK_COMPLETE']:
return True
if 'NextToken' in stack:
nextToken = stack['NextToken']
return False
except botocore.exceptions.ClientError as err:
err_msg = err.response['Error']['Message']
err_code = err.response['Error']['Code']
if err_msg != "Stack with id {} does not exist".format(self.stack_name) and err_code != 'ValidationError':
return False
def outputs(self, output_key, **kwargs):
return self.output(output_key, **kwargs)
def output(self, output_key, **kwargs):
cloudformation = self.session.client('cloudformation')
no_fail = False
if kwargs:
no_fail = kwargs.get('no_fail', False)
try:
stack = None
nextToken = None
while not stack:
resp = None
if nextToken:
resp = cloudformation.describe_stacks(StackName = self.stack_name, NextToken = nextToken)
else:
resp = cloudformation.describe_stacks(StackName = self.stack_name)
for stack in resp['Stacks']:
if stack['StackStatus'] in ['CREATE_COMPLETE', 'ROLLBACK_COMPLETE','UPDATE_COMPLETE','UPDATE_ROLLBACK_COMPLETE']:
break
if 'NextToken' in stack:
nextToken = stack['NextToken']
# output_value = None
if 'Outputs' in stack:
for output in stack['Outputs']:
if output['OutputKey'] == output_key:
return output['OutputValue']
except botocore.exceptions.ClientError as err:
err_msg = err.response['Error']['Message']
err_code = err.response['Error']['Code']
if err_msg != "Stack with id {} does not exist".format(self.stack_name) and err_code != 'ValidationError':
if no_fail:
print "Stack with id {} does not exist".format(self.stack_name)
else:
raise Exception, "Stack with id {} does not exist".format(self.stack_name), sys.exc_info()[2]
print "Can't find output parameter %s in stack %s under %s profile" % (output_key, self.stack_name, self.profile_name)
return None
def validate(self, details = False):
s3 = self.session.client('s3')
for template in ([self.template] + self.includes):
if template in __builtin__.validated_templates:
print 'Template {} has been validated already'.format(template)
continue
else:
__builtin__.validated_templates.append(template)
temp_filename = "temp/%s-%s" % (uuid.uuid4(), os.path.basename(template))
print "Uploading %s to temporary location s3://%s/%s" % (template, self.s3_bucket, temp_filename)
S3Transfer(s3).upload_file(
template,
self.s3_bucket,
temp_filename,
extra_args={'ACL': 'bucket-owner-full-control'}
)
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, temp_filename)
print "Validating template %s" % template_url
resp = self.session.client('cloudformation').validate_template(
TemplateURL = template_url
)
if details:
print 'Template {} details: {}'.format(template, json.dumps(resp, indent=2, separators=(',', ': ')))
print "Removing temporary file /%s from s3" % temp_filename
s3.delete_object(
Bucket = self.s3_bucket,
Key = temp_filename,
)
def create(self, **kwargs):
self._upload()
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, self.s3_key)
print "Creating stack {}".format(stack_name)
resp = cloudformation.create_stack(
StackName = stack_name,
TemplateURL = template_url,
Capabilities = ['CAPABILITY_NAMED_IAM'],
OnFailure = self.on_failure,
Parameters = self._join_parameters(self.parameters, kwargs.get('parameters', None))
)
waiter = cloudformation.get_waiter('stack_create_complete')
waiter.wait(
StackName = resp['StackId']
)
return
def update(self, **kwargs):
self._upload()
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, self.s3_key)
print "Updating stack {}".format(stack_name)
resp = cloudformation.update_stack(
StackName = stack_name,
TemplateURL = template_url,
Capabilities = ['CAPABILITY_NAMED_IAM'],
Parameters = self._join_parameters(self.parameters, kwargs.get('parameters', None))
)
waiter = cloudformation.get_waiter('stack_update_complete')
waiter.wait(
StackName = resp['StackId']
)
return
def delete(self, **kwargs):
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
cloudformation.delete_stack(
StackName = stack_name
)
waiter = cloudformation.get_waiter('stack_delete_complete')
waiter.wait(
StackName = stack_name
)
return
def estimate_cost(self, **kwargs):
self._upload()
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, self.s3_key)
print "Estimating template s3://{}/{}".format(self.s3_bucket, self.s3_key)
resp = cloudformation.estimate_template_cost(
TemplateURL = template_url,
Parameters = self._join_parameters(self.parameters, kwargs.get('parameters', None))
)
print 'Check URL to see your template costs estimateion:\n{}'.format(resp['Url'])
return
def _upload(self):
print "Uploading %s to s3://%s/%s" % (self.template, self.s3_bucket, self.s3_key)
S3Transfer(self.session.client('s3')).upload_file(
self.template,
self.s3_bucket,
self.s3_key,
extra_args={'ACL': 'bucket-owner-full-control'}
)
s3_key = self.s3_key
if not s3_key.endswith('/'):
s3_key = s3_key[:s3_key.rfind('/')+1]
for file in (self.includes + self.resources):
file_s3_key = '{}{}'.format(s3_key, os.path.basename(file))
print "Uploading %s to s3://%s/%s" % (file, self.s3_bucket, file_s3_key)
S3Transfer(self.session.client('s3')).upload_file(
file,
self.s3_bucket,
file_s3_key,
extra_args={'ACL': 'bucket-owner-full-control'}
)
def _join_parameters(self, params1, params2):
if (params1 and type(params1) != list) or (params2 and type(params2) != list):
raise Exception("Parameters argument should be a list() or None")
if not params1 and params2:
return params2
elif params1 and not params2:
return params1
elif params1 and params2:
result_d = dict()
for param in params1:
result_d[param['ParameterKey']] = param['ParameterValue']
for param in params2:
result_d[param['ParameterKey']] = param['ParameterValue']
result = list()
for key in result_d:
result.append({
'ParameterKey': key,
'ParameterValue': result_d[key]
})
return result
else:
return list()
| apache-2.0 |
Nick-OpusVL/odoo | openerp/addons/base/report/preview_report.py | 447 | 1457 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
class rmlparser(report_sxw.rml_parse):
def set_context(self, objects, data, ids, report_type = None):
super(rmlparser,self).set_context(objects, data, ids, report_type)
self.setCompany(objects[0])
report_sxw.report_sxw('report.preview.report', 'res.company',
'addons/base/report/preview_report.rml', parser=rmlparser, header='external')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ksrajkumar/openerp-6.1 | openerp/addons/base_report_designer/__openerp__.py | 9 | 1886 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'OpenOffice Report Designer',
'version': '0.1',
'category': 'Reporting',
'complexity': "normal",
'description': """
This module is used along with OpenERP OpenOffice Plugin.
=========================================================
This module adds wizards to Import/Export .sxw report that
you can modify in OpenOffice. Once you have modified it you can
upload the report using the same wizard.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'init_xml': ['wizard/base_report_design_view.xml'],
'update_xml': ['base_report_designer_installer.xml'],
'demo_xml': [],
'installable': True,
'auto_install': False,
'certificate': '0056379010493',
'images': ['images/base_report_designer1.jpeg','images/base_report_designer2.jpeg',],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tobspr/panda3d | direct/src/leveleditor/ActionMgr.py | 6 | 14542 | from pandac.PandaModules import *
from . import ObjectGlobals as OG
class ActionMgr:
def __init__(self):
self.undoList = []
self.redoList = []
def reset(self):
while len(self.undoList) > 0:
action = self.undoList.pop()
action.destroy()
while len(self.redoList) > 0:
action = self.redoList.pop()
action.destroy()
def push(self, action):
self.undoList.append(action)
if len(self.redoList) > 0:
self.redoList.pop()
def undo(self):
if len(self.undoList) < 1:
print('No more undo')
else:
action = self.undoList.pop()
self.redoList.append(action)
action.undo()
def redo(self):
if len(self.redoList) < 1:
print('No more redo')
else:
action = self.redoList.pop()
self.undoList.append(action)
action.redo()
class ActionBase(Functor):
""" Base class for user actions """
def __init__(self, function, *args, **kargs):
self.function = function
if function is None:
def nullFunc():
pass
function = nullFunc
Functor.__init__(self, function, *args, **kargs)
self.result = None
def _do__call__(self, *args, **kargs):
self.saveStatus()
self.result = Functor._do__call__(self, *args, **kargs)
self.postCall()
return self.result
# needed this line to override _do__call__
__call__ = _do__call__
def redo(self):
self.result = self._do__call__()
return self.result
def saveStatus(self):
# save object status for undo here
pass
def postCall(self):
# implement post process here
pass
def undo(self):
print("undo method is not defined for this action")
class ActionAddNewObj(ActionBase):
""" Action class for adding new object """
def __init__(self, editor, *args, **kargs):
self.editor = editor
function = self.editor.objectMgr.addNewObject
ActionBase.__init__(self, function, *args, **kargs)
self.uid = None
def postCall(self):
obj = self.editor.objectMgr.findObjectByNodePath(self.result)
if obj:
self.uid = obj[OG.OBJ_UID]
def redo(self):
if self.uid is None:
print("Can't redo this add")
else:
self.result = self._do__call__(uid=self.uid)
return self.result
def undo(self):
if self.result is None:
print("Can't undo this add")
else:
print("Undo: addNewObject")
if self.uid:
obj = self.editor.objectMgr.findObjectById(self.uid)
else:
obj = self.editor.objectMgr.findObjectByNodePath(self.result)
if obj:
self.uid = obj[OG.OBJ_UID]
self.editor.ui.sceneGraphUI.delete(self.uid)
base.direct.deselect(obj[OG.OBJ_NP])
base.direct.removeNodePath(obj[OG.OBJ_NP])
self.result = None
else:
print("Can't undo this add")
class ActionDeleteObj(ActionBase):
""" Action class for deleting object """
def __init__(self, editor, *args, **kargs):
self.editor = editor
function = base.direct.removeAllSelected
ActionBase.__init__(self, function, *args, **kargs)
self.selectedUIDs = []
self.hierarchy = {}
self.objInfos = {}
self.objTransforms = {}
def saveStatus(self):
selectedNPs = base.direct.selected.getSelectedAsList()
def saveObjStatus(np, isRecursive=True):
obj = self.editor.objectMgr.findObjectByNodePath(np)
if obj:
uid = obj[OG.OBJ_UID]
if not isRecursive:
self.selectedUIDs.append(uid)
objNP = obj[OG.OBJ_NP]
self.objInfos[uid] = obj
self.objTransforms[uid] = objNP.getMat()
parentNP = objNP.getParent()
if parentNP == render:
self.hierarchy[uid] = None
else:
parentObj = self.editor.objectMgr.findObjectByNodePath(parentNP)
if parentObj:
self.hierarchy[uid] = parentObj[OG.OBJ_UID]
for child in np.getChildren():
if child.hasTag('OBJRoot'):
saveObjStatus(child)
for np in selectedNPs:
saveObjStatus(np, False)
def undo(self):
if len(self.hierarchy) == 0 or\
len(self.objInfos) == 0:
print("Can't undo this deletion")
else:
print("Undo: deleteObject")
def restoreObject(uid, parentNP):
obj = self.objInfos[uid]
objDef = obj[OG.OBJ_DEF]
objModel = obj[OG.OBJ_MODEL]
objProp = obj[OG.OBJ_PROP]
objRGBA = obj[OG.OBJ_RGBA]
objNP = self.editor.objectMgr.addNewObject(objDef.name,
uid,
obj[OG.OBJ_MODEL],
parentNP)
self.editor.objectMgr.updateObjectColor(objRGBA[0], objRGBA[1], objRGBA[2], objRGBA[3], objNP)
self.editor.objectMgr.updateObjectProperties(objNP, objProp)
objNP.setMat(self.objTransforms[uid])
while len(self.hierarchy) > 0:
for uid in self.hierarchy:
if self.hierarchy[uid] is None:
parentNP = None
restoreObject(uid, parentNP)
del self.hierarchy[uid]
else:
parentObj = self.editor.objectMgr.findObjectById(self.hierarchy[uid])
if parentObj:
parentNP = parentObj[OG.OBJ_NP]
restoreObject(uid, parentNP)
del self.hierarchy[uid]
base.direct.deselectAllCB()
for uid in self.selectedUIDs:
obj = self.editor.objectMgr.findObjectById(uid)
if obj:
self.editor.select(obj[OG.OBJ_NP], fMultiSelect=1, fUndo=0)
self.selecteUIDs = []
self.hierarchy = {}
self.objInfos = {}
class ActionDeleteObjById(ActionBase):
""" Action class for deleting object """
def __init__(self, editor, uid):
self.editor = editor
function = self.editor.objectMgr.removeObjectById
self.uid = uid
ActionBase.__init__(self, function, self.uid)
self.hierarchy = {}
self.objInfos = {}
self.objTransforms = {}
def saveStatus(self):
def saveObjStatus(uid_np, isUID=False):
if isUID:
obj = self.editor.objectMgr.findObjectById(uid_np)
else:
obj = self.editor.objectMgr.findObjectByNodePath(uid_np)
if obj:
uid = obj[OG.OBJ_UID]
objNP = obj[OG.OBJ_NP]
self.objInfos[uid] = obj
self.objTransforms[uid] = objNP.getMat()
parentNP = objNP.getParent()
if parentNP == render:
self.hierarchy[uid] = None
else:
parentObj = self.editor.objectMgr.findObjectByNodePath(parentNP)
if parentObj:
self.hierarchy[uid] = parentObj[OG.OBJ_UID]
for child in objNP.getChildren():
if child.hasTag('OBJRoot'):
saveObjStatus(child)
saveObjStatus(self.uid, True)
def undo(self):
if len(self.hierarchy) == 0 or\
len(self.objInfos) == 0:
print("Can't undo this deletion")
else:
print("Undo: deleteObjectById")
def restoreObject(uid, parentNP):
obj = self.objInfos[uid]
objDef = obj[OG.OBJ_DEF]
objModel = obj[OG.OBJ_MODEL]
objProp = obj[OG.OBJ_PROP]
objRGBA = obj[OG.OBJ_RGBA]
objNP = self.editor.objectMgr.addNewObject(objDef.name,
uid,
obj[OG.OBJ_MODEL],
parentNP)
self.editor.objectMgr.updateObjectColor(objRGBA[0], objRGBA[1], objRGBA[2], objRGBA[3], objNP)
self.editor.objectMgr.updateObjectProperties(objNP, objProp)
objNP.setMat(self.objTransforms[uid])
while len(self.hierarchy) > 0:
for uid in self.hierarchy:
if self.hierarchy[uid] is None:
parentNP = None
restoreObject(uid, parentNP)
del self.hierarchy[uid]
else:
parentObj = self.editor.objectMgr.findObjectById(self.hierarchy[uid])
if parentObj:
parentNP = parentObj[OG.OBJ_NP]
restoreObject(uid, parentNP)
del self.hierarchy[uid]
self.hierarchy = {}
self.objInfos = {}
class ActionChangeHierarchy(ActionBase):
""" Action class for changing Scene Graph Hierarchy """
def __init__(self, editor, oldGrandParentId, oldParentId, newParentId, childName, *args, **kargs):
self.editor = editor
self.oldGrandParentId = oldGrandParentId
self.oldParentId = oldParentId
self.newParentId = newParentId
self.childName = childName
function = self.editor.ui.sceneGraphUI.parent
ActionBase.__init__(self, function, self.oldParentId, self.newParentId, self.childName, **kargs)
def undo(self):
self.editor.ui.sceneGraphUI.parent(self.oldParentId, self.oldGrandParentId, self.childName)
class ActionSelectObj(ActionBase):
""" Action class for adding new object """
def __init__(self, editor, *args, **kargs):
self.editor = editor
function = base.direct.selectCB
ActionBase.__init__(self, function, *args, **kargs)
self.selectedUIDs = []
def saveStatus(self):
selectedNPs = base.direct.selected.getSelectedAsList()
for np in selectedNPs:
obj = self.editor.objectMgr.findObjectByNodePath(np)
if obj:
uid = obj[OG.OBJ_UID]
self.selectedUIDs.append(uid)
def undo(self):
print("Undo : selectObject")
base.direct.deselectAllCB()
for uid in self.selectedUIDs:
obj = self.editor.objectMgr.findObjectById(uid)
if obj:
self.editor.select(obj[OG.OBJ_NP], fMultiSelect=1, fUndo=0)
self.selectedUIDs = []
class ActionTransformObj(ActionBase):
""" Action class for object transformation """
def __init__(self, editor, *args, **kargs):
self.editor = editor
function = self.editor.objectMgr.setObjectTransform
ActionBase.__init__(self, function, *args, **kargs)
self.uid = args[0]
#self.xformMat = Mat4(args[1])
self.origMat = None
def saveStatus(self):
obj = self.editor.objectMgr.findObjectById(self.uid)
if obj:
self.origMat = Mat4(self.editor.objectMgr.objectsLastXform[obj[OG.OBJ_UID]])
#self.origMat = Mat4(obj[OG.OBJ_NP].getMat())
def _do__call__(self, *args, **kargs):
self.result = ActionBase._do__call__(self, *args, **kargs)
obj = self.editor.objectMgr.findObjectById(self.uid)
if obj:
self.editor.objectMgr.objectsLastXform[self.uid] = Mat4(obj[OG.OBJ_NP].getMat())
return self.result
def undo(self):
if self.origMat is None:
print("Can't undo this transform")
else:
print("Undo: transformObject")
obj = self.editor.objectMgr.findObjectById(self.uid)
if obj:
obj[OG.OBJ_NP].setMat(self.origMat)
self.editor.objectMgr.objectsLastXform[self.uid] = Mat4(self.origMat)
del self.origMat
self.origMat = None
class ActionDeselectAll(ActionBase):
""" Action class for adding new object """
def __init__(self, editor, *args, **kargs):
self.editor = editor
function = base.direct.deselectAllCB
ActionBase.__init__(self, function, *args, **kargs)
self.selectedUIDs = []
def saveStatus(self):
selectedNPs = base.direct.selected.getSelectedAsList()
for np in selectedNPs:
obj = self.editor.objectMgr.findObjectByNodePath(np)
if obj:
uid = obj[OG.OBJ_UID]
self.selectedUIDs.append(uid)
def undo(self):
print("Undo : deselectAll")
base.direct.deselectAllCB()
for uid in self.selectedUIDs:
obj = self.editor.objectMgr.findObjectById(uid)
if obj:
self.editor.select(obj[OG.OBJ_NP], fMultiSelect=1, fUndo=0)
self.selectedUIDs = []
class ActionUpdateObjectProp(ActionBase):
""" Action class for updating object property """
def __init__(self, editor, fSelectObject, obj, propName, val, oldVal, function, undoFunc, *args, **kargs):
self.editor = editor
self.fSelectObject = fSelectObject
self.obj = obj
self.propName = propName
self.newVal = val
self.oldVal = oldVal
self.undoFunc = undoFunc
ActionBase.__init__(self, function, *args, **kargs)
def saveStatus(self):
self.obj[OG.OBJ_PROP][self.propName] = self.newVal
def redo(self):
self.result = self._do__call__()#uid=self.uid, xformMat=self.xformMat)
if self.editor and self.fSelectObject:
base.direct.select(self.obj[OG.OBJ_NP], fUndo=0)
return self.result
def undo(self):
print("Undo : updateObjectProp")
if self.oldVal:
self.obj[OG.OBJ_PROP][self.propName] = self.oldVal
if self.undoFunc:
self.undoFunc()
if self.editor and self.fSelectObject:
base.direct.select(self.obj[OG.OBJ_NP], fUndo=0)
| bsd-3-clause |
simonwittber/middleman | clients/py/serviceprovider.py | 1 | 5656 | #!/usr/bin/env python
import asyncio
import websockets
import io
import uuid
SERVER_KEY = "xyzzy"
class ServiceProvider:
def init(self, ws, require_uid):
self.requests = {}
self.pub_handlers = {}
self.req_handlers = {}
self.ws = ws
self.public_pub_events = []
self.require_uid = require_uid
async def subscribe_to_events(self):
prefix = self.__class__.__name__
for name in dir(self):
value = getattr(self, name)
if callable(value):
ev = name[4:]
if name.startswith("PUB_"):
print("Subscribing to External event: " + ev)
self.pub_handlers[ev] = value
if name.startswith("REQ_"):
print("Responding to External request: " + ev)
self.req_handlers[ev] = value
if name.startswith("TASK_"):
print("Starting Task:", name[5:])
asyncio.ensure_future(value())
await self.epub(prefix)
await self.ereq(prefix)
await self.sub(prefix)
async def ereq(self, name):
await self.ws.outbox.put("EREQ "+name+"\n\n")
async def epub(self, name):
await self.ws.outbox.put("EPUB "+name+"\n\n")
async def esub(self, name):
await self.ws.outbox.put("ESUB "+name+"\n\n")
async def internal(self, name, headers):
await self.ws.outbox.put("INT "+name+"\n"+self.headerText(headers)+"\n")
async def pub(self, name, headers, msg):
await self.ws.outbox.put("PUB "+name+"\n"+self.headerText(headers)+"\n"+self.encode(msg))
async def sub(self, name, headers=None):
await self.ws.outbox.put("SUB "+name+"\n"+self.headerText(headers)+"\n")
async def uns(self, name, headers=None):
await self.ws.outbox.put("UNS "+name+"\n"+self.headerText(headers)+"\n")
async def req(self, name, headers, msg):
if headers is None:
headers = {}
request_id = headers["rid"] = uuid.uuid1().hex
future = self.requests[request_id] = asyncio.Future()
await self.ws.outbox.put("REQ "+name+"\n"+self.headerText(headers)+"\n"+self.encode(msg))
return await future
return future.result()
async def res(self, name, headers, msg):
await self.ws.outbox.put("RES "+name+"\n"+self.headerText(headers)+"\n"+self.encode(msg))
async def recv_pub(self, name, headers, stream):
method = headers["cmd"]
await self.pub_handlers[method](headers, self.decode(stream))
async def recv_req(self, name, headers, stream):
method = headers["cmd"]
return await self.req_handlers[method](headers, self.decode(stream))
async def recv_res(self, name, headers, stream):
future = self.requests.pop(headers["rid"])
future.set_result(self.decode(stream))
def headerText(self, headers):
return "" if headers is None else "".join("{}:{}\n".format(*i) for i in headers.items())
async def handle_incoming(self, stream):
try:
header = stream.readline().strip().split(" ")
cmd = header[0].strip()
name = header[1].strip()
headers = {}
while True:
header = stream.readline().strip()
if header == "" or header is None:
break
parts = header.split(":")
headers[parts[0].strip().lower()] = ":".join(parts[1:]).strip()
uid = headers.get("uid", "")
if uid == "":
uid = None
try:
if self.require_uid and uid is None:
raise Exception("No UID")
if cmd == "REQ":
result = await self.recv_req(name, headers, stream)
await self.res(name, headers, result)
elif cmd == "PUB":
await self.recv_pub(name, headers, stream)
elif cmd == "RES":
await self.recv_res(name, headers, stream)
except Exception as e:
await self.pub("MSG:"+headers["cid"], dict(cmd="error", type=e.__class__.__name__, msg=str(e)), None)
except Exception as e:
print(type(e), e)
raise
async def handle_outgoing_queue(ws):
while ws.open:
msg = await ws.outbox.get()
await ws.send(msg + "\r\n.\r\n")
async def service(service_provider_class, require_uid=True):
ws = None
while ws is None:
try:
ws = await websockets.connect('ws://127.0.0.1:8764/')
except ConnectionRefusedError as e:
print(e)
await asyncio.sleep(5)
ws = None
await ws.send(SERVER_KEY)
ok = await ws.recv()
if ok != "MM OK":
raise Exception("Bad Key")
myConnID = await ws.recv()
print("My Conn ID: " + myConnID)
ws.outbox = asyncio.Queue()
send_task = asyncio.ensure_future(handle_outgoing_queue(ws))
sp = service_provider_class()
sp.init(ws, require_uid)
await sp.subscribe_to_events()
while True:
msg = await ws.recv()
if msg is None:
break
if isinstance(msg, bytes):
msg = msg.decode()
stream = io.StringIO(msg)
asyncio.ensure_future(sp.handle_incoming(stream))
send_task.cancel()
await ws.close()
def run(service_provider_class, *tasks, require_uid=True):
for t in tasks:
asyncio.ensure_future(t())
asyncio.get_event_loop().run_until_complete(
service(service_provider_class, require_uid=require_uid))
| mit |
Fireblend/chromium-crosswalk | native_client_sdk/src/build_tools/tests/verify_filelist_test.py | 132 | 3854 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import verify_filelist
def Verify(platform, rules_contents, directory_list):
rules = verify_filelist.Rules('test', platform, rules_contents)
rules.VerifyDirectoryList(directory_list)
class VerifyFilelistTestCase(unittest.TestCase):
def testBasic(self):
rules = """\
foo/file1
foo/file2
foo/file3
bar/baz/other
"""
dirlist = ['foo/file1', 'foo/file2', 'foo/file3', 'bar/baz/other']
Verify('linux', rules, dirlist)
def testGlob(self):
rules = 'foo/*'
dirlist = ['foo/file1', 'foo/file2', 'foo/file3/and/subdir']
Verify('linux', rules, dirlist)
def testPlatformVar(self):
rules = 'dir/${PLATFORM}/blah'
dirlist = ['dir/linux/blah']
Verify('linux', rules, dirlist)
def testPlatformVarGlob(self):
rules = 'dir/${PLATFORM}/*'
dirlist = ['dir/linux/file1', 'dir/linux/file2']
Verify('linux', rules, dirlist)
def testPlatformRule(self):
rules = """\
[linux]dir/linux/only
all/platforms
"""
linux_dirlist = ['dir/linux/only', 'all/platforms']
other_dirlist = ['all/platforms']
Verify('linux', rules, linux_dirlist)
Verify('mac', rules, other_dirlist)
def testMultiPlatformRule(self):
rules = """\
[linux,win]dir/no/macs
all/platforms
"""
nonmac_dirlist = ['dir/no/macs', 'all/platforms']
mac_dirlist = ['all/platforms']
Verify('linux', rules, nonmac_dirlist)
Verify('win', rules, nonmac_dirlist)
Verify('mac', rules, mac_dirlist)
def testPlatformRuleBadPlatform(self):
rules = '[frob]bad/platform'
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, [])
def testMissingFile(self):
rules = """\
foo/file1
foo/missing
"""
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testExtraFile(self):
rules = 'foo/file1'
dirlist = ['foo/file1', 'foo/extra_file']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testEmptyGlob(self):
rules = 'foo/*'
dirlist = ['foo'] # Directory existing is not enough!
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testBadGlob(self):
rules = '*/foo/bar'
dirlist = []
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, dirlist)
def testUnknownPlatform(self):
rules = 'foo'
dirlist = ['foo']
for platform in ('linux', 'mac', 'win'):
Verify(platform, rules, dirlist)
self.assertRaises(verify_filelist.ParseException, Verify,
'foobar', rules, dirlist)
def testUnexpectedPlatformFile(self):
rules = '[mac,win]foo/file1'
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testWindowsPaths(self):
if os.path.sep != '/':
rules = 'foo/bar/baz'
dirlist = ['foo\\bar\\baz']
Verify('win', rules, dirlist)
else:
rules = 'foo/bar/baz\\foo'
dirlist = ['foo/bar/baz\\foo']
Verify('linux', rules, dirlist)
def testNestedGlobs(self):
rules = """\
foo/*
foo/bar/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
rules = """\
foo/bar/*
foo/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
huiren/ece511 | configs/common/cpu2000.py | 48 | 22462 | # Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os
import sys
from os.path import basename, exists, join as joinpath, normpath
from os.path import isdir, isfile, islink
spec_dist = os.environ.get('M5_CPU2000', '/dist/m5/cpu2000')
def copyfiles(srcdir, dstdir):
from filecmp import cmp as filecmp
from shutil import copyfile
srcdir = normpath(srcdir)
dstdir = normpath(dstdir)
if not isdir(dstdir):
os.mkdir(dstdir)
for root, dirs, files in os.walk(srcdir):
root = normpath(root)
prefix = os.path.commonprefix([root, srcdir])
root = root[len(prefix):]
if root.startswith('/'):
root = root[1:]
for entry in dirs:
newdir = joinpath(dstdir, root, entry)
if not isdir(newdir):
os.mkdir(newdir)
for entry in files:
dest = normpath(joinpath(dstdir, root, entry))
src = normpath(joinpath(srcdir, root, entry))
if not isfile(dest) or not filecmp(src, dest):
copyfile(src, dest)
# some of the spec benchmarks expect to be run from one directory up.
# just create some symlinks that solve the problem
inlink = joinpath(dstdir, 'input')
outlink = joinpath(dstdir, 'output')
if not exists(inlink):
os.symlink('.', inlink)
if not exists(outlink):
os.symlink('.', outlink)
class Benchmark(object):
def __init__(self, isa, os, input_set):
if not hasattr(self.__class__, 'name'):
self.name = self.__class__.__name__
if not hasattr(self.__class__, 'binary'):
self.binary = self.name
if not hasattr(self.__class__, 'args'):
self.args = []
if not hasattr(self.__class__, 'output'):
self.output = '%s.out' % self.name
if not hasattr(self.__class__, 'simpoint'):
self.simpoint = None
try:
func = getattr(self.__class__, input_set)
except AttributeError:
raise AttributeError, \
'The benchmark %s does not have the %s input set' % \
(self.name, input_set)
executable = joinpath(spec_dist, 'binaries', isa, os, self.binary)
if not isfile(executable):
raise AttributeError, '%s not found' % executable
self.executable = executable
# root of tree for input & output data files
data_dir = joinpath(spec_dist, 'data', self.name)
# optional subtree with files shared across input sets
all_dir = joinpath(data_dir, 'all')
# dirs for input & output files for this input set
inputs_dir = joinpath(data_dir, input_set, 'input')
outputs_dir = joinpath(data_dir, input_set, 'output')
# keep around which input set was specified
self.input_set = input_set
if not isdir(inputs_dir):
raise AttributeError, '%s not found' % inputs_dir
self.inputs_dir = [ inputs_dir ]
if isdir(all_dir):
self.inputs_dir += [ joinpath(all_dir, 'input') ]
if isdir(outputs_dir):
self.outputs_dir = outputs_dir
if not hasattr(self.__class__, 'stdin'):
self.stdin = joinpath(inputs_dir, '%s.in' % self.name)
if not isfile(self.stdin):
self.stdin = None
if not hasattr(self.__class__, 'stdout'):
self.stdout = joinpath(outputs_dir, '%s.out' % self.name)
if not isfile(self.stdout):
self.stdout = None
func(self, isa, os)
def makeLiveProcessArgs(self, **kwargs):
# set up default args for LiveProcess object
process_args = {}
process_args['cmd'] = [ self.name ] + self.args
process_args['executable'] = self.executable
if self.stdin:
process_args['input'] = self.stdin
if self.stdout:
process_args['output'] = self.stdout
if self.simpoint:
process_args['simpoint'] = self.simpoint
# explicit keywords override defaults
process_args.update(kwargs)
return process_args
def makeLiveProcess(self, **kwargs):
process_args = self.makeLiveProcessArgs(**kwargs)
# figure out working directory: use m5's outdir unless
# overridden by LiveProcess's cwd param
cwd = process_args.get('cwd')
if not cwd:
from m5 import options
cwd = options.outdir
process_args['cwd'] = cwd
if not isdir(cwd):
os.makedirs(cwd)
# copy input files to working directory
for d in self.inputs_dir:
copyfiles(d, cwd)
# generate LiveProcess object
from m5.objects import LiveProcess
return LiveProcess(**process_args)
def __str__(self):
return self.name
class DefaultBenchmark(Benchmark):
def ref(self, isa, os): pass
def test(self, isa, os): pass
def train(self, isa, os): pass
class MinneDefaultBenchmark(DefaultBenchmark):
def smred(self, isa, os): pass
def mdred(self, isa, os): pass
def lgred(self, isa, os): pass
class ammp(MinneDefaultBenchmark):
name = 'ammp'
number = 188
lang = 'C'
simpoint = 108*100E6
class applu(MinneDefaultBenchmark):
name = 'applu'
number = 173
lang = 'F77'
simpoint = 2179*100E6
class apsi(MinneDefaultBenchmark):
name = 'apsi'
number = 301
lang = 'F77'
simpoint = 3408*100E6
class art(DefaultBenchmark):
name = 'art'
number = 179
lang = 'C'
def test(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '2',
'-startx', '134',
'-starty', '220',
'-endx', '139',
'-endy', '225',
'-objects', '1' ]
self.output = 'test.out'
def train(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '2',
'-startx', '134',
'-starty', '220',
'-endx', '184',
'-endy', '240',
'-objects', '3' ]
self.output = 'train.out'
def lgred(self, isa, os):
self.args = ['-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '5',
'-startx', '134',
'-starty', '220',
'-endx', '184',
'-endy', '240',
'-objects', '1' ]
self.output = 'lgred.out'
class art110(art):
def ref(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-trainfile2', 'hc.img',
'-stride', '2',
'-startx', '110',
'-starty', '200',
'-endx', '160',
'-endy', '240',
'-objects', '10' ]
self.output = 'ref.1.out'
self.simpoint = 340*100E6
class art470(art):
def ref(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-trainfile2', 'hc.img',
'-stride', '2',
'-startx', '470',
'-starty', '140',
'-endx', '520',
'-endy', '180',
'-objects', '10' ]
self.output = 'ref.2.out'
self.simpoint = 365*100E6
class equake(DefaultBenchmark):
name = 'equake'
number = 183
lang = 'C'
simpoint = 812*100E6
def lgred(self, isa, os): pass
class facerec(MinneDefaultBenchmark):
name = 'facerec'
number = 187
lang = 'F'
simpoint = 375*100E6
class fma3d(MinneDefaultBenchmark):
name = 'fma3d'
number = 191
lang = 'F'
simpoint = 2541*100E6
class galgel(MinneDefaultBenchmark):
name = 'galgel'
number = 178
lang = 'F'
simpoint = 2491*100E6
class lucas(MinneDefaultBenchmark):
name = 'lucas'
number = 189
lang = 'F'
simpoint = 545*100E6
class mesa(Benchmark):
name = 'mesa'
number = 177
lang = 'C'
stdin = None
def __set_args(self, frames):
self.args = [ '-frames', frames, '-meshfile', '%s.in' % self.name,
'-ppmfile', '%s.ppm' % self.name ]
def test(self, isa, os):
self.__set_args('10')
def train(self, isa, os):
self.__set_args('500')
def ref(self, isa, os):
self.__set_args('1000')
self.simpoint = 1135*100E6
def lgred(self, isa, os):
self.__set_args('1')
class mgrid(MinneDefaultBenchmark):
name = 'mgrid'
number = 172
lang = 'F77'
simpoint = 3292*100E6
class sixtrack(DefaultBenchmark):
name = 'sixtrack'
number = 200
lang = 'F77'
simpoint = 3043*100E6
def lgred(self, isa, os): pass
class swim(MinneDefaultBenchmark):
name = 'swim'
number = 171
lang = 'F77'
simpoint = 2079*100E6
class wupwise(DefaultBenchmark):
name = 'wupwise'
number = 168
lang = 'F77'
simpoint = 3237*100E6
def lgred(self, isa, os): pass
class bzip2(DefaultBenchmark):
name = 'bzip2'
number = 256
lang = 'C'
def test(self, isa, os):
self.args = [ 'input.random' ]
def train(self, isa, os):
self.args = [ 'input.compressed' ]
class bzip2_source(bzip2):
def ref(self, isa, os):
self.simpoint = 977*100E6
self.args = [ 'input.source', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.source', '1' ]
class bzip2_graphic(bzip2):
def ref(self, isa, os):
self.simpoint = 718*100E6
self.args = [ 'input.graphic', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
class bzip2_program(bzip2):
def ref(self, isa, os):
self.simpoint = 458*100E6
self.args = [ 'input.program', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.program', '1' ]
class crafty(MinneDefaultBenchmark):
name = 'crafty'
number = 186
lang = 'C'
simpoint = 774*100E6
class eon(MinneDefaultBenchmark):
name = 'eon'
number = 252
lang = 'CXX'
stdin = None
class eon_kajiya(eon):
args = [ 'chair.control.kajiya', 'chair.camera', 'chair.surfaces',
'chair.kajiya.ppm', 'ppm', 'pixels_out.kajiya']
output = 'kajiya_log.out'
class eon_cook(eon):
args = [ 'chair.control.cook', 'chair.camera', 'chair.surfaces',
'chair.cook.ppm', 'ppm', 'pixels_out.cook' ]
output = 'cook_log.out'
class eon_rushmeier(eon):
args = [ 'chair.control.rushmeier', 'chair.camera', 'chair.surfaces',
'chair.rushmeier.ppm', 'ppm', 'pixels_out.rushmeier' ]
output = 'rushmeier_log.out'
simpoint = 403*100E6
class gap(DefaultBenchmark):
name = 'gap'
number = 254
lang = 'C'
def __set_args(self, size):
self.args = [ '-l', './', '-q', '-m', size ]
def test(self, isa, os):
self.__set_args('64M')
def train(self, isa, os):
self.__set_args('128M')
def ref(self, isa, os):
self.__set_args('192M')
self.simpoint = 674*100E6
def lgred(self, isa, os):
self.__set_args('64M')
def mdred(self, isa, os):
self.__set_args('64M')
def smred(self, isa, os):
self.__set_args('64M')
class gcc(DefaultBenchmark):
name = 'gcc'
number = 176
lang = 'C'
def test(self, isa, os):
self.args = [ 'cccp.i', '-o', 'cccp.s' ]
def train(self, isa, os):
self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ]
def smred(self, isa, os):
self.args = [ 'c-iterate.i', '-o', 'c-iterate.s' ]
def mdred(self, isa, os):
self.args = [ 'rdlanal.i', '-o', 'rdlanal.s' ]
def lgred(self, isa, os):
self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ]
class gcc_166(gcc):
def ref(self, isa, os):
self.simpoint = 389*100E6
self.args = [ '166.i', '-o', '166.s' ]
class gcc_200(gcc):
def ref(self, isa, os):
self.simpoint = 736*100E6
self.args = [ '200.i', '-o', '200.s' ]
class gcc_expr(gcc):
def ref(self, isa, os):
self.simpoint = 36*100E6
self.args = [ 'expr.i', '-o', 'expr.s' ]
class gcc_integrate(gcc):
def ref(self, isa, os):
self.simpoint = 4*100E6
self.args = [ 'integrate.i', '-o', 'integrate.s' ]
class gcc_scilab(gcc):
def ref(self, isa, os):
self.simpoint = 207*100E6
self.args = [ 'scilab.i', '-o', 'scilab.s' ]
class gzip(DefaultBenchmark):
name = 'gzip'
number = 164
lang = 'C'
def test(self, isa, os):
self.args = [ 'input.compressed', '2' ]
def train(self, isa, os):
self.args = [ 'input.combined', '32' ]
class gzip_source(gzip):
def ref(self, isa, os):
self.simpoint = 334*100E6
self.args = [ 'input.source', '1' ]
def smred(self, isa, os):
self.args = [ 'input.source', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.source', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.source', '1' ]
class gzip_log(gzip):
def ref(self, isa, os):
self.simpoint = 265*100E6
self.args = [ 'input.log', '60' ]
def smred(self, isa, os):
self.args = [ 'input.log', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.log', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.log', '1' ]
class gzip_graphic(gzip):
def ref(self, isa, os):
self.simpoint = 653*100E6
self.args = [ 'input.graphic', '60' ]
def smred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
class gzip_random(gzip):
def ref(self, isa, os):
self.simpoint = 623*100E6
self.args = [ 'input.random', '60' ]
def smred(self, isa, os):
self.args = [ 'input.random', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.random', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.random', '1' ]
class gzip_program(gzip):
def ref(self, isa, os):
self.simpoint = 1189*100E6
self.args = [ 'input.program', '60' ]
def smred(self, isa, os):
self.args = [ 'input.program', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.program', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.program', '1' ]
class mcf(MinneDefaultBenchmark):
name = 'mcf'
number = 181
lang = 'C'
args = [ 'mcf.in' ]
simpoint = 553*100E6
class parser(MinneDefaultBenchmark):
name = 'parser'
number = 197
lang = 'C'
args = [ '2.1.dict', '-batch' ]
simpoint = 1146*100E6
class perlbmk(DefaultBenchmark):
name = 'perlbmk'
number = 253
lang = 'C'
def test(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'test.pl' ]
self.stdin = 'test.in'
class perlbmk_diffmail(perlbmk):
def ref(self, isa, os):
self.simpoint = 141*100E6
self.args = [ '-I', 'lib', 'diffmail.pl', '2', '550', '15', '24',
'23', '100' ]
def train(self, isa, os):
self.args = [ '-I', 'lib', 'diffmail.pl', '2', '350', '15', '24',
'23', '150' ]
class perlbmk_scrabbl(perlbmk):
def train(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'scrabbl.pl' ]
self.stdin = 'scrabbl.in'
class perlbmk_makerand(perlbmk):
def ref(self, isa, os):
self.simpoint = 11*100E6
self.args = [ '-I', 'lib', 'makerand.pl' ]
def lgred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'lgred.makerand.pl' ]
def mdred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'mdred.makerand.pl' ]
def smred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'smred.makerand.pl' ]
class perlbmk_perfect(perlbmk):
def ref(self, isa, os):
self.simpoint = 5*100E6
self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3', 'm', '4' ]
def train(self, isa, os):
self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3' ]
class perlbmk_splitmail1(perlbmk):
def ref(self, isa, os):
self.simpoint = 405*100E6
self.args = [ '-I', 'lib', 'splitmail.pl', '850', '5', '19',
'18', '1500' ]
class perlbmk_splitmail2(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '704', '12', '26',
'16', '836' ]
class perlbmk_splitmail3(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '535', '13', '25',
'24', '1091' ]
class perlbmk_splitmail4(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '957', '12', '23',
'26', '1014' ]
class twolf(Benchmark):
name = 'twolf'
number = 300
lang = 'C'
stdin = None
def test(self, isa, os):
self.args = [ 'test' ]
def train(self, isa, os):
self.args = [ 'train' ]
def ref(self, isa, os):
self.simpoint = 1066*100E6
self.args = [ 'ref' ]
def smred(self, isa, os):
self.args = [ 'smred' ]
def mdred(self, isa, os):
self.args = [ 'mdred' ]
def lgred(self, isa, os):
self.args = [ 'lgred' ]
class vortex(Benchmark):
name = 'vortex'
number = 255
lang = 'C'
stdin = None
def __init__(self, isa, os, input_set):
if (isa in ('alpha', 'arm', 'thumb', 'aarch64')):
self.endian = 'lendian'
elif (isa == 'sparc' or isa == 'sparc32'):
self.endian = 'bendian'
else:
raise AttributeError, "unknown ISA %s" % isa
super(vortex, self).__init__(isa, os, input_set)
def test(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def train(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def smred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def mdred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def lgred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
class vortex1(vortex):
def ref(self, isa, os):
self.args = [ '%s1.raw' % self.endian ]
self.output = 'vortex1.out'
self.simpoint = 271*100E6
class vortex2(vortex):
def ref(self, isa, os):
self.simpoint = 1024*100E6
self.args = [ '%s2.raw' % self.endian ]
self.output = 'vortex2.out'
class vortex3(vortex):
def ref(self, isa, os):
self.simpoint = 564*100E6
self.args = [ '%s3.raw' % self.endian ]
self.output = 'vortex3.out'
class vpr(MinneDefaultBenchmark):
name = 'vpr'
number = 175
lang = 'C'
# not sure about vpr minnespec place.in
class vpr_place(vpr):
args = [ 'net.in', 'arch.in', 'place.out', 'dum.out', '-nodisp',
'-place_only', '-init_t', '5', '-exit_t', '0.005',
'-alpha_t', '0.9412', '-inner_num', '2' ]
output = 'place_log.out'
class vpr_route(vpr):
simpoint = 476*100E6
args = [ 'net.in', 'arch.in', 'place.in', 'route.out', '-nodisp',
'-route_only', '-route_chan_width', '15',
'-pres_fac_mult', '2', '-acc_fac', '1',
'-first_iter_pres_fac', '4', '-initial_pres_fac', '8' ]
output = 'route_log.out'
all = [ ammp, applu, apsi, art, art110, art470, equake, facerec, fma3d, galgel,
lucas, mesa, mgrid, sixtrack, swim, wupwise, bzip2_source,
bzip2_graphic, bzip2_program, crafty, eon_kajiya, eon_cook,
eon_rushmeier, gap, gcc_166, gcc_200, gcc_expr, gcc_integrate,
gcc_scilab, gzip_source, gzip_log, gzip_graphic, gzip_random,
gzip_program, mcf, parser, perlbmk_diffmail, perlbmk_makerand,
perlbmk_perfect, perlbmk_splitmail1, perlbmk_splitmail2,
perlbmk_splitmail3, perlbmk_splitmail4, twolf, vortex1, vortex2,
vortex3, vpr_place, vpr_route ]
__all__ = [ x.__name__ for x in all ]
if __name__ == '__main__':
from pprint import pprint
for bench in all:
for input_set in 'ref', 'test', 'train':
print 'class: %s' % bench.__name__
x = bench('alpha', 'tru64', input_set)
print '%s: %s' % (x, input_set)
pprint(x.makeLiveProcessArgs())
print
| bsd-3-clause |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/django/contrib/admin/widgets.py | 156 | 12061 | """
Form Widget classes specific to the Django admin site.
"""
import django.utils.copycompat as copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None: attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked: attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.DateInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'}, format=format)
class AdminTimeWidget(forms.TimeInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'}, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if isinstance(v, list):
v = u','.join([str(x) for x in v])
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = unicode(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None, using=None):
self.rel = rel
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = u'?' + u'&'.join([u'%s=%s' % (k, v) for k, v in params.items()])
else:
url = u''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append(u'<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
return url_params_from_lookup_dict(self.rel.limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(truncate_words(obj, 14))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([force_unicode(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())
try:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
except NoReverseMatch:
info = (self.admin_site.root_path, rel_to._meta.app_label, rel_to._meta.object_name.lower())
related_url = '%s%s/%s/add/' % info
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| gpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.2/Lib/plat-sunos5/TYPES.py | 3 | 5807 | # Generated by h2py from /usr/include/sys/types.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
# Included from sys/feature_tests.h
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506L
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION = 4
_XOPEN_VERSION = 3
# Included from sys/machtypes.h
# Included from sys/inttypes.h
# Included from sys/int_types.h
# Included from sys/int_limits.h
INT8_MAX = (127)
INT16_MAX = (32767)
INT32_MAX = (2147483647)
INTMAX_MAX = INT32_MAX
INT_LEAST8_MAX = INT8_MAX
INT_LEAST16_MAX = INT16_MAX
INT_LEAST32_MAX = INT32_MAX
INT8_MIN = (-128)
INT16_MIN = (-32767-1)
INT32_MIN = (-2147483647-1)
INTMAX_MIN = INT32_MIN
INT_LEAST8_MIN = INT8_MIN
INT_LEAST16_MIN = INT16_MIN
INT_LEAST32_MIN = INT32_MIN
# Included from sys/int_const.h
def INT8_C(c): return (c)
def INT16_C(c): return (c)
def INT32_C(c): return (c)
def INT64_C(c): return __CONCAT__(c,l)
def INT64_C(c): return __CONCAT__(c,ll)
def UINT8_C(c): return __CONCAT__(c,u)
def UINT16_C(c): return __CONCAT__(c,u)
def UINT32_C(c): return __CONCAT__(c,u)
def UINT64_C(c): return __CONCAT__(c,ul)
def UINT64_C(c): return __CONCAT__(c,ull)
def INTMAX_C(c): return __CONCAT__(c,l)
def UINTMAX_C(c): return __CONCAT__(c,ul)
def INTMAX_C(c): return __CONCAT__(c,ll)
def UINTMAX_C(c): return __CONCAT__(c,ull)
def INTMAX_C(c): return (c)
def UINTMAX_C(c): return (c)
# Included from sys/int_fmtio.h
PRId8 = "d"
PRId16 = "d"
PRId32 = "d"
PRId64 = "ld"
PRId64 = "lld"
PRIdLEAST8 = "d"
PRIdLEAST16 = "d"
PRIdLEAST32 = "d"
PRIdLEAST64 = "ld"
PRIdLEAST64 = "lld"
PRIi8 = "i"
PRIi16 = "i"
PRIi32 = "i"
PRIi64 = "li"
PRIi64 = "lli"
PRIiLEAST8 = "i"
PRIiLEAST16 = "i"
PRIiLEAST32 = "i"
PRIiLEAST64 = "li"
PRIiLEAST64 = "lli"
PRIo8 = "o"
PRIo16 = "o"
PRIo32 = "o"
PRIo64 = "lo"
PRIo64 = "llo"
PRIoLEAST8 = "o"
PRIoLEAST16 = "o"
PRIoLEAST32 = "o"
PRIoLEAST64 = "lo"
PRIoLEAST64 = "llo"
PRIx8 = "x"
PRIx16 = "x"
PRIx32 = "x"
PRIx64 = "lx"
PRIx64 = "llx"
PRIxLEAST8 = "x"
PRIxLEAST16 = "x"
PRIxLEAST32 = "x"
PRIxLEAST64 = "lx"
PRIxLEAST64 = "llx"
PRIX8 = "X"
PRIX16 = "X"
PRIX32 = "X"
PRIX64 = "lX"
PRIX64 = "llX"
PRIXLEAST8 = "X"
PRIXLEAST16 = "X"
PRIXLEAST32 = "X"
PRIXLEAST64 = "lX"
PRIXLEAST64 = "llX"
PRIu8 = "u"
PRIu16 = "u"
PRIu32 = "u"
PRIu64 = "lu"
PRIu64 = "llu"
PRIuLEAST8 = "u"
PRIuLEAST16 = "u"
PRIuLEAST32 = "u"
PRIuLEAST64 = "lu"
PRIuLEAST64 = "llu"
SCNd16 = "hd"
SCNd32 = "d"
SCNd64 = "ld"
SCNd64 = "lld"
SCNi16 = "hi"
SCNi32 = "i"
SCNi64 = "li"
SCNi64 = "lli"
SCNo16 = "ho"
SCNo32 = "o"
SCNo64 = "lo"
SCNo64 = "llo"
SCNu16 = "hu"
SCNu32 = "u"
SCNu64 = "lu"
SCNu64 = "llu"
SCNx16 = "hx"
SCNx32 = "x"
SCNx64 = "lx"
SCNx64 = "llx"
PRIdMAX = "ld"
PRIoMAX = "lo"
PRIxMAX = "lx"
PRIuMAX = "lu"
PRIdMAX = "lld"
PRIoMAX = "llo"
PRIxMAX = "llx"
PRIuMAX = "llu"
PRIdMAX = "d"
PRIoMAX = "o"
PRIxMAX = "x"
PRIuMAX = "u"
SCNiMAX = "li"
SCNdMAX = "ld"
SCNoMAX = "lo"
SCNxMAX = "lx"
SCNiMAX = "lli"
SCNdMAX = "lld"
SCNoMAX = "llo"
SCNxMAX = "llx"
SCNiMAX = "i"
SCNdMAX = "d"
SCNoMAX = "o"
SCNxMAX = "x"
# Included from sys/types32.h
SHRT_MIN = (-32768)
SHRT_MAX = 32767
USHRT_MAX = 65535
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-9223372036854775807L-1L)
LONG_MAX = 9223372036854775807L
LONG_MIN = (-2147483647L-1L)
LONG_MAX = 2147483647L
P_MYID = (-1)
# Included from sys/select.h
# Included from sys/time.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
# Included from sys/time_impl.h
def TIMESPEC_OVERFLOW(ts): return \
def ITIMERSPEC_OVERFLOW(it): return \
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_HIGHRES = 4
CLOCK_MAX = 5
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
def TICK_TO_SEC(tick): return ((tick) / hz)
def SEC_TO_TICK(sec): return ((sec) * hz)
def TICK_TO_MSEC(tick): return \
def MSEC_TO_TICK(msec): return \
def MSEC_TO_TICK_ROUNDUP(msec): return \
def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
def USEC_TO_TICK_ROUNDUP(usec): return \
def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
def NSEC_TO_TICK_ROUNDUP(nsec): return \
def TIMEVAL_TO_TICK(tvp): return \
def TIMESTRUC_TO_TICK(tsp): return \
# Included from time.h
from TYPES import *
# Included from iso/time_iso.h
NULL = 0L
NULL = 0
CLOCKS_PER_SEC = 1000000
FD_SETSIZE = 65536
FD_SETSIZE = 1024
_NBBY = 8
NBBY = _NBBY
def FD_ZERO(p): return bzero((p), sizeof (*(p)))
| mit |
deandunbar/bitwave | hackathon_version/venv/lib/python2.7/site-packages/PIL/_binary.py | 19 | 1575 | #
# The Python Imaging Library.
# $Id$
#
# Binary input/output support routines.
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
# Copyright (c) 2012 by Brian Crowell
#
# See the README file for information on usage and redistribution.
#
if bytes is str:
def i8(c):
return ord(c)
def o8(i):
return chr(i & 255)
else:
def i8(c):
return c if c.__class__ is int else c[0]
def o8(i):
return bytes((i & 255,))
# Input, le = little endian, be = big endian
# TODO: replace with more readable struct.unpack equivalent
def i16le(c, o=0):
"""
Converts a 2-bytes (16 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return i8(c[o]) | (i8(c[o+1]) << 8)
def i32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return (i8(c[o]) | (i8(c[o+1]) << 8) | (i8(c[o+2]) << 16) |
(i8(c[o+3]) << 24))
def i16be(c, o=0):
return (i8(c[o]) << 8) | i8(c[o+1])
def i32be(c, o=0):
return ((i8(c[o]) << 24) | (i8(c[o+1]) << 16) |
(i8(c[o+2]) << 8) | i8(c[o+3]))
# Output, le = little endian, be = big endian
def o16le(i):
return o8(i) + o8(i >> 8)
def o32le(i):
return o8(i) + o8(i >> 8) + o8(i >> 16) + o8(i >> 24)
def o16be(i):
return o8(i >> 8) + o8(i)
def o32be(i):
return o8(i >> 24) + o8(i >> 16) + o8(i >> 8) + o8(i)
# End of file
| mit |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py | 317 | 6189 | import base64
import io
import json
import zlib
from pip._vendor.requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, pickle
def _b64_encode_bytes(b):
return base64.b64encode(b).decode("ascii")
def _b64_encode_str(s):
return _b64_encode_bytes(s.encode("utf8"))
def _b64_decode_bytes(b):
return base64.b64decode(b.encode("ascii"))
def _b64_decode_str(s):
return _b64_decode_bytes(s).decode("utf8")
class Serializer(object):
def dumps(self, request, response, body=None):
response_headers = CaseInsensitiveDict(response.headers)
if body is None:
body = response.read(decode_content=False)
# NOTE: 99% sure this is dead code. I'm only leaving it
# here b/c I don't have a test yet to prove
# it. Basically, before using
# `cachecontrol.filewrapper.CallbackFileWrapper`,
# this made an effort to reset the file handle. The
# `CallbackFileWrapper` short circuits this code by
# setting the body as the content is consumed, the
# result being a `body` argument is *always* passed
# into cache_response, and in turn,
# `Serializer.dump`.
response._fp = io.BytesIO(body)
data = {
"response": {
"body": _b64_encode_bytes(body),
"headers": dict(
(_b64_encode_str(k), _b64_encode_str(v))
for k, v in response.headers.items()
),
"status": response.status,
"version": response.version,
"reason": _b64_encode_str(response.reason),
"strict": response.strict,
"decode_content": response.decode_content,
},
}
# Construct our vary headers
data["vary"] = {}
if "vary" in response_headers:
varied_headers = response_headers['vary'].split(',')
for header in varied_headers:
header = header.strip()
data["vary"][header] = request.headers.get(header, None)
# Encode our Vary headers to ensure they can be serialized as JSON
data["vary"] = dict(
(_b64_encode_str(k), _b64_encode_str(v) if v is not None else v)
for k, v in data["vary"].items()
)
return b",".join([
b"cc=2",
zlib.compress(
json.dumps(
data, separators=(",", ":"), sort_keys=True,
).encode("utf8"),
),
])
def loads(self, request, data):
# Short circuit if we've been given an empty set of data
if not data:
return
# Determine what version of the serializer the data was serialized
# with
try:
ver, data = data.split(b",", 1)
except ValueError:
ver = b"cc=0"
# Make sure that our "ver" is actually a version and isn't a false
# positive from a , being in the data stream.
if ver[:3] != b"cc=":
data = ver + data
ver = b"cc=0"
# Get the version number out of the cc=N
ver = ver.split(b"=", 1)[-1].decode("ascii")
# Dispatch to the actual load method for the given version
try:
return getattr(self, "_loads_v{0}".format(ver))(request, data)
except AttributeError:
# This is a version we don't have a loads function for, so we'll
# just treat it as a miss and return None
return
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode('utf8'))
return HTTPResponse(
body=body,
preload_content=False,
**cached["response"]
)
def _loads_v0(self, request, data):
# The original legacy cache data. This doesn't contain enough
# information to construct everything we need, so we'll treat this as
# a miss.
return
def _loads_v1(self, request, data):
try:
cached = pickle.loads(data)
except ValueError:
return
return self.prepare_response(request, cached)
def _loads_v2(self, request, data):
try:
cached = json.loads(zlib.decompress(data).decode("utf8"))
except ValueError:
return
# We need to decode the items that we've base64 encoded
cached["response"]["body"] = _b64_decode_bytes(
cached["response"]["body"]
)
cached["response"]["headers"] = dict(
(_b64_decode_str(k), _b64_decode_str(v))
for k, v in cached["response"]["headers"].items()
)
cached["response"]["reason"] = _b64_decode_str(
cached["response"]["reason"],
)
cached["vary"] = dict(
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
for k, v in cached["vary"].items()
)
return self.prepare_response(request, cached)
| gpl-2.0 |
pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/dynamic/stair_bauzil_hrp2_interp_noRamp.py | 1 | 7684 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
import omniORB.any
import stair_bauzil_hrp2_path_noRamp as tp
import time
from hpp.corbaserver.rbprm.rbprmstate import State,StateHelper
from display_tools import *
from constraint_to_dae import *
packageName = "hrp2_14_description"
meshPackageName = "hrp2_14_description"
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "hrp2_14"
urdfSuffix = "_reduced"
srdfSuffix = ""
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", [0,1.55, -0.25, -0.15, 0.2, 2])
fullBody.client.basic.robot.setDimensionExtraConfigSpace(tp.extraDof)
fullBody.client.basic.robot.setExtraConfigSpaceBounds([0,0,0,0,0,0,0,0,0,0,0,0])
ps = tp.ProblemSolver( fullBody )
ps.client.problem.setParameter("aMax",omniORB.any.to_any(tp.aMax))
ps.client.problem.setParameter("aMaxZ",omniORB.any.to_any(1.))
ps.client.problem.setParameter("vMax",omniORB.any.to_any(tp.vMax))
ps.client.problem.setParameter("friction",tp.mu)
r = tp.Viewer (ps,viewerClient=tp.r.client,displayArrows = True, displayCoM = True)
#~ AFTER loading obstacles
lLegId = 'hrp2_lleg_rom'
lLeg = 'LLEG_JOINT0'
lLegOffset = [0,0,-0.105]
lLegNormal = [0,0,1]
lLegx = 0.09; lLegy = 0.05
fullBody.addLimb(lLegId,lLeg,'',lLegOffset,lLegNormal, lLegx, lLegy, 50000, "manipulability", 0.01,"_6_DOF")
"""
rarmId = 'hrp2_rarm_rom'
rarm = 'RARM_JOINT0'
rHand = 'RARM_JOINT5'
rArmOffset = [0,0,-0.1]
rArmNormal = [0,0,1]
rArmx = 0.024; rArmy = 0.024
#disabling collision for hook
fullBody.addLimb(rarmId,rarm,rHand,rArmOffset,rArmNormal, rArmx, rArmy, 100000, "manipulability", 0.01, "_6_DOF", True)
"""
rLegId = 'hrp2_rleg_rom'
rLeg = 'RLEG_JOINT0'
rLegOffset = [0,0,-0.105]
rLegNormal = [0,0,1]
rLegx = 0.09; rLegy = 0.05
fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, 50000, "manipulability", 0.01,"_6_DOF")
"""
#~ AFTER loading obstacles
larmId = '4Larm'
larm = 'LARM_JOINT0'
lHand = 'LARM_JOINT5'
lArmOffset = [-0.05,-0.050,-0.050]
lArmNormal = [1,0,0]
lArmx = 0.024; lArmy = 0.024
#~ fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, 10000, 0.05)
rKneeId = '0RKnee'
rLeg = 'RLEG_JOINT0'
rKnee = 'RLEG_JOINT3'
rLegOffset = [0.105,0.055,0.017]
rLegNormal = [-1,0,0]
rLegx = 0.05; rLegy = 0.05
#~ fullBody.addLimb(rKneeId, rLeg,rKnee,rLegOffset,rLegNormal, rLegx, rLegy, 10000, 0.01)
#~
lKneeId = '1LKnee'
lLeg = 'LLEG_JOINT0'
lKnee = 'LLEG_JOINT3'
lLegOffset = [0.105,0.055,0.017]
lLegNormal = [-1,0,0]
lLegx = 0.05; lLegy = 0.05
#~ fullBody.addLimb(lKneeId,lLeg,lKnee,lLegOffset,lLegNormal, lLegx, lLegy, 10000, 0.01)
#~
#~ fullBody.runLimbSampleAnalysis(rLegId, "jointLimitsDistance", True)
#~ fullBody.runLimbSampleAnalysis(lLegId, "jointLimitsDistance", True)
#~ fullBody.client.basic.robot.setJointConfig('LARM_JOINT0',[1])
#~ fullBody.client.basic.robot.setJointConfig('RARM_JOINT0',[-1])
"""
q_0 = fullBody.getCurrentConfig();
#~ fullBody.createOctreeBoxes(r.client.gui, 1, rarmId, q_0,)
q_init =[0.1, -0.82, 0.648702, 1.0, 0.0 , 0.0, 0.0,0.0, 0.0, 0.0, 0.0,0.261799388, 0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17,0.261799388, -0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17,0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0,0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0,0,0,0,0,0,0]; r (q_init)
q_ref = q_init[::]
fullBody.setCurrentConfig (q_init)
fullBody.setReferenceConfig (q_ref)
configSize = fullBody.getConfigSize() -fullBody.client.basic.robot.getDimensionExtraConfigSpace()
q_init = fullBody.getCurrentConfig(); q_init[0:7] = tp.ps.configAtParam(0,0.01)[0:7] # use this to get the correct orientation
q_goal = fullBody.getCurrentConfig(); q_goal[0:7] = tp.ps.configAtParam(0,tp.ps.pathLength(0))[0:7]
dir_init = tp.ps.configAtParam(0,0.01)[tp.indexECS:tp.indexECS+3]
acc_init = tp.ps.configAtParam(0,0.01)[tp.indexECS+3:tp.indexECS+6]
dir_goal = tp.ps.configAtParam(0,tp.ps.pathLength(0))[tp.indexECS:tp.indexECS+3]
acc_goal = tp.ps.configAtParam(0,tp.ps.pathLength(0))[tp.indexECS+3:tp.indexECS+6]
fullBody.runLimbSampleAnalysis(rLegId, "ReferenceConfiguration", True)
fullBody.runLimbSampleAnalysis(lLegId, "ReferenceConfiguration", True)
# FIXME : test
q_init[2] = q_init[2]+0.02
q_goal[2] = q_goal[2]+0.02
#q_init[0:3]=[0.28994563306701016,-0.82,0.6191688248477717]
fullBody.setStaticStability(True)
# Randomly generating a contact configuration at q_init
fullBody.setCurrentConfig (q_init) ; r(q_init)
# Randomly generating a contact configuration at q_end
fullBody.setCurrentConfig (q_goal)
# copy extraconfig for start and init configurations
q_init[configSize:configSize+3] = dir_init[::]
q_init[configSize+3:configSize+6] = acc_init[::]
q_goal[configSize:configSize+3] = dir_goal[::]
q_goal[configSize+3:configSize+6] = acc_goal[::]
# specifying the full body configurations as start and goal state of the problem
q_init = fullBody.generateContacts(q_init, dir_init,acc_init,1)
q_goal = fullBody.generateContacts(q_goal, dir_goal,acc_goal,1)
r(q_init)
fullBody.setStartState(q_init,[rLegId,lLegId])
fullBody.setEndState(q_goal,[rLegId,lLegId])
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
pp.dt=0.001
configs = fullBody.interpolate(0.05,pathId=0,robustnessTreshold = 0, filterStates = True)
print "number of configs :", len(configs)
r(configs[-1])
noCOQP = 0
for i in range(len(configs)-1):
pid = fullBody.isDynamicallyReachableFromState(i,i+1)
if len(pid)==0:
noCOQP +=1
f = open("/local/fernbac/bench_iros18/success/log_successStairsNoRamp.log","a")
if noCOQP>0:
f.write("fail, with "+str(noCOQP)+" infeasibles transitions\n")
else:
f.write("all transition feasibles\n")
f.close()
"""
from fullBodyPlayerHrp2 import Player
player = Player(fullBody,pp,tp,configs,draw=False,optim_effector=False,use_velocity=False,pathId = 0)
player.displayContactPlan()
"""
"""
from check_qp import *
check_one_transition(ps,fullBody,s0,s2)
check_contact_planps,r,pp,fullBody,0,len(configs)-1)
from planning.config import *
from generate_contact_sequence import *
beginState = 0
endState = len(configs)-1
cs = generateContactSequence(fullBody,configs,beginState, endState,r)
filename = OUTPUT_DIR + "/" + OUTPUT_SEQUENCE_FILE
cs.saveAsXML(filename, "ContactSequence")
print "save contact sequence : ",filename
"""
"""
from constraint_to_dae import *
fullBody.isReachableFromState(3,4)
displayTwoStepConstraints(r,True)
removeAllConstraints(r)
s_m = fullBody.addState(fullBody.getConfigAtState(1),[rLegId,rarmId])
fullBody.isReachableFromState(1,s_m)
displayOneStepConstraints(r)
s_m = fullBody.addState(fullBody.getConfigAtState(7),[rLegId])
fullBody.isReachableFromState(7,s_m)
displayOneStepConstraints(r)
print "####################################"
print "# SOLVING P2 : #"
print "# DONE #"
print "####################################"
print "# Writing contact sequence file : #"
print "####################################"
from planning.configs.stairs_config import *
from generate_contact_sequence import *
cs = generateContactSequence(fullBody,configs[:5],r)
filename = OUTPUT_DIR + "/" + OUTPUT_SEQUENCE_FILE
cs.saveAsXML(filename, CONTACT_SEQUENCE_XML_TAG)
print "save contact sequence : ",filename
print "####################################"
print "# Writing contact sequence file : #"
print "# DONE #"
print "####################################"
"""
| lgpl-3.0 |
timm/timmnix | pypy3-v5.5.0-linux64/lib-python/3/idlelib/keybindingDialog.py | 60 | 12427 | """
Dialog for building Tkinter accelerator key bindings
"""
from tkinter import *
import tkinter.messagebox as tkMessageBox
import string
from idlelib import macosxSupport
class GetKeysDialog(Toplevel):
def __init__(self,parent,title,action,currentKeySequences):
"""
action - string, the name of the virtual event these keys will be
mapped to
currentKeys - list, a list of all key sequence lists currently mapped
to virtual events, for overlap checking
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.action=action
self.currentKeySequences=currentKeySequences
self.result=''
self.keyString=StringVar(self)
self.keyString.set('')
self.SetModifiersForPlatform() # set self.modifiers, self.modifier_label
self.modifier_vars = []
for modifier in self.modifiers:
variable = StringVar(self)
variable.set('')
self.modifier_vars.append(variable)
self.advanced = False
self.CreateWidgets()
self.LoadFinalKeyList()
self.withdraw() #hide while setting geometry
self.update_idletasks()
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOK = Button(frameButtons,text='OK',
width=8,command=self.OK)
self.buttonOK.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
self.frameKeySeqBasic = Frame(frameMain)
self.frameKeySeqAdvanced = Frame(frameMain)
self.frameControlsBasic = Frame(frameMain)
self.frameHelpAdvanced = Frame(frameMain)
self.frameKeySeqAdvanced.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5)
self.frameKeySeqBasic.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5)
self.frameKeySeqBasic.lift()
self.frameHelpAdvanced.grid(row=1,column=0,sticky=NSEW,padx=5)
self.frameControlsBasic.grid(row=1,column=0,sticky=NSEW,padx=5)
self.frameControlsBasic.lift()
self.buttonLevel = Button(frameMain,command=self.ToggleLevel,
text='Advanced Key Binding Entry >>')
self.buttonLevel.grid(row=2,column=0,stick=EW,padx=5,pady=5)
labelTitleBasic = Label(self.frameKeySeqBasic,
text="New keys for '"+self.action+"' :")
labelTitleBasic.pack(anchor=W)
labelKeysBasic = Label(self.frameKeySeqBasic,justify=LEFT,
textvariable=self.keyString,relief=GROOVE,borderwidth=2)
labelKeysBasic.pack(ipadx=5,ipady=5,fill=X)
self.modifier_checkbuttons = {}
column = 0
for modifier, variable in zip(self.modifiers, self.modifier_vars):
label = self.modifier_label.get(modifier, modifier)
check=Checkbutton(self.frameControlsBasic,
command=self.BuildKeyString,
text=label,variable=variable,onvalue=modifier,offvalue='')
check.grid(row=0,column=column,padx=2,sticky=W)
self.modifier_checkbuttons[modifier] = check
column += 1
labelFnAdvice=Label(self.frameControlsBasic,justify=LEFT,
text=\
"Select the desired modifier keys\n"+
"above, and the final key from the\n"+
"list on the right.\n\n" +
"Use upper case Symbols when using\n" +
"the Shift modifier. (Letters will be\n" +
"converted automatically.)")
labelFnAdvice.grid(row=1,column=0,columnspan=4,padx=2,sticky=W)
self.listKeysFinal=Listbox(self.frameControlsBasic,width=15,height=10,
selectmode=SINGLE)
self.listKeysFinal.bind('<ButtonRelease-1>',self.FinalKeySelected)
self.listKeysFinal.grid(row=0,column=4,rowspan=4,sticky=NS)
scrollKeysFinal=Scrollbar(self.frameControlsBasic,orient=VERTICAL,
command=self.listKeysFinal.yview)
self.listKeysFinal.config(yscrollcommand=scrollKeysFinal.set)
scrollKeysFinal.grid(row=0,column=5,rowspan=4,sticky=NS)
self.buttonClear=Button(self.frameControlsBasic,
text='Clear Keys',command=self.ClearKeySeq)
self.buttonClear.grid(row=2,column=0,columnspan=4)
labelTitleAdvanced = Label(self.frameKeySeqAdvanced,justify=LEFT,
text="Enter new binding(s) for '"+self.action+"' :\n"+
"(These bindings will not be checked for validity!)")
labelTitleAdvanced.pack(anchor=W)
self.entryKeysAdvanced=Entry(self.frameKeySeqAdvanced,
textvariable=self.keyString)
self.entryKeysAdvanced.pack(fill=X)
labelHelpAdvanced=Label(self.frameHelpAdvanced,justify=LEFT,
text="Key bindings are specified using Tkinter keysyms as\n"+
"in these samples: <Control-f>, <Shift-F2>, <F12>,\n"
"<Control-space>, <Meta-less>, <Control-Alt-Shift-X>.\n"
"Upper case is used when the Shift modifier is present!\n\n" +
"'Emacs style' multi-keystroke bindings are specified as\n" +
"follows: <Control-x><Control-y>, where the first key\n" +
"is the 'do-nothing' keybinding.\n\n" +
"Multiple separate bindings for one action should be\n"+
"separated by a space, eg., <Alt-v> <Meta-v>." )
labelHelpAdvanced.grid(row=0,column=0,sticky=NSEW)
def SetModifiersForPlatform(self):
"""Determine list of names of key modifiers for this platform.
The names are used to build Tk bindings -- it doesn't matter if the
keyboard has these keys, it matters if Tk understands them. The
order is also important: key binding equality depends on it, so
config-keys.def must use the same ordering.
"""
import sys
if macosxSupport.runningAsOSXApp():
self.modifiers = ['Shift', 'Control', 'Option', 'Command']
else:
self.modifiers = ['Control', 'Alt', 'Shift']
self.modifier_label = {'Control': 'Ctrl'} # short name
def ToggleLevel(self):
if self.buttonLevel.cget('text')[:8]=='Advanced':
self.ClearKeySeq()
self.buttonLevel.config(text='<< Basic Key Binding Entry')
self.frameKeySeqAdvanced.lift()
self.frameHelpAdvanced.lift()
self.entryKeysAdvanced.focus_set()
self.advanced = True
else:
self.ClearKeySeq()
self.buttonLevel.config(text='Advanced Key Binding Entry >>')
self.frameKeySeqBasic.lift()
self.frameControlsBasic.lift()
self.advanced = False
def FinalKeySelected(self,event):
self.BuildKeyString()
def BuildKeyString(self):
keyList = modifiers = self.GetModifiers()
finalKey = self.listKeysFinal.get(ANCHOR)
if finalKey:
finalKey = self.TranslateKey(finalKey, modifiers)
keyList.append(finalKey)
self.keyString.set('<' + '-'.join(keyList) + '>')
def GetModifiers(self):
modList = [variable.get() for variable in self.modifier_vars]
return [mod for mod in modList if mod]
def ClearKeySeq(self):
self.listKeysFinal.select_clear(0,END)
self.listKeysFinal.yview(MOVETO, '0.0')
for variable in self.modifier_vars:
variable.set('')
self.keyString.set('')
def LoadFinalKeyList(self):
#these tuples are also available for use in validity checks
self.functionKeys=('F1','F2','F2','F4','F5','F6','F7','F8','F9',
'F10','F11','F12')
self.alphanumKeys=tuple(string.ascii_lowercase+string.digits)
self.punctuationKeys=tuple('~!@#%^&*()_-+={}[]|;:,.<>/?')
self.whitespaceKeys=('Tab','Space','Return')
self.editKeys=('BackSpace','Delete','Insert')
self.moveKeys=('Home','End','Page Up','Page Down','Left Arrow',
'Right Arrow','Up Arrow','Down Arrow')
#make a tuple of most of the useful common 'final' keys
keys=(self.alphanumKeys+self.punctuationKeys+self.functionKeys+
self.whitespaceKeys+self.editKeys+self.moveKeys)
self.listKeysFinal.insert(END, *keys)
def TranslateKey(self, key, modifiers):
"Translate from keycap symbol to the Tkinter keysym"
translateDict = {'Space':'space',
'~':'asciitilde','!':'exclam','@':'at','#':'numbersign',
'%':'percent','^':'asciicircum','&':'ampersand','*':'asterisk',
'(':'parenleft',')':'parenright','_':'underscore','-':'minus',
'+':'plus','=':'equal','{':'braceleft','}':'braceright',
'[':'bracketleft',']':'bracketright','|':'bar',';':'semicolon',
':':'colon',',':'comma','.':'period','<':'less','>':'greater',
'/':'slash','?':'question','Page Up':'Prior','Page Down':'Next',
'Left Arrow':'Left','Right Arrow':'Right','Up Arrow':'Up',
'Down Arrow': 'Down', 'Tab':'Tab'}
if key in translateDict:
key = translateDict[key]
if 'Shift' in modifiers and key in string.ascii_lowercase:
key = key.upper()
key = 'Key-' + key
return key
def OK(self, event=None):
if self.advanced or self.KeysOK(): # doesn't check advanced string yet
self.result=self.keyString.get()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
def KeysOK(self):
'''Validity check on user's 'basic' keybinding selection.
Doesn't check the string produced by the advanced dialog because
'modifiers' isn't set.
'''
keys = self.keyString.get()
keys.strip()
finalKey = self.listKeysFinal.get(ANCHOR)
modifiers = self.GetModifiers()
# create a key sequence list for overlap check:
keySequence = keys.split()
keysOK = False
title = 'Key Sequence Error'
if not keys:
tkMessageBox.showerror(title=title, parent=self,
message='No keys specified.')
elif not keys.endswith('>'):
tkMessageBox.showerror(title=title, parent=self,
message='Missing the final Key')
elif (not modifiers
and finalKey not in self.functionKeys + self.moveKeys):
tkMessageBox.showerror(title=title, parent=self,
message='No modifier key(s) specified.')
elif (modifiers == ['Shift']) \
and (finalKey not in
self.functionKeys + self.moveKeys + ('Tab', 'Space')):
msg = 'The shift modifier by itself may not be used with'\
' this key symbol.'
tkMessageBox.showerror(title=title, parent=self, message=msg)
elif keySequence in self.currentKeySequences:
msg = 'This key combination is already in use.'
tkMessageBox.showerror(title=title, parent=self, message=msg)
else:
keysOK = True
return keysOK
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetKeysDialog(root,'Get Keys','find-again',[])
print(dlg.result)
Button(root,text='Dialog',command=run).pack()
root.mainloop()
| mit |
NDNUtils/NDNSIM | src/visualizer/visualizer/plugins/show_last_packets.py | 182 | 9460 | import gobject
import gtk
import ns.core
import ns.network
import ns.visualizer
from visualizer.base import InformationWindow
from visualizer.higcontainer import HIGContainer
from kiwi.ui.objectlist import ObjectList, Column
class ShowLastPackets(InformationWindow):
class PacketList(gtk.ScrolledWindow):
(
COLUMN_TIME,
COLUMN_INTERFACE,
COLUMN_SIZE,
COLUMN_CONTENTS,
) = range(4)
def __init__(self):
super(ShowLastPackets.PacketList, self).__init__()
self.set_properties(hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy=gtk.POLICY_AUTOMATIC)
self.table_model = gtk.ListStore(*([str]*4))
treeview = gtk.TreeView(self.table_model)
treeview.show()
self.add(treeview)
def add_column(descr, colid):
column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column("Time", self.COLUMN_TIME)
add_column("Interface", self.COLUMN_INTERFACE)
add_column("Size", self.COLUMN_SIZE)
add_column("Contents", self.COLUMN_CONTENTS)
def update(self, node, packet_list):
self.table_model.clear()
for sample in packet_list:
tree_iter = self.table_model.append()
if sample.device is None:
interface_name = "(unknown)"
else:
interface_name = ns.core.Names.FindName(sample.device)
if not interface_name:
interface_name = "(interface %i)" % sample.device.GetIfIndex()
self.table_model.set(tree_iter,
self.COLUMN_TIME, str(sample.time.GetSeconds()),
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_SIZE, str(sample.packet.GetSize ()),
self.COLUMN_CONTENTS, str(sample.packet)
)
def __init__(self, visualizer, node_index):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("Last packets for node %i" % node_index)
self.visualizer = visualizer
self.viz_node = visualizer.get_node(node_index)
self.node = ns.network.NodeList.GetNode(node_index)
def smart_expand(expander, vbox):
if expander.get_expanded():
vbox.set_child_packing(expander, expand=True, fill=True, padding=0, pack_type=gtk.PACK_START)
else:
vbox.set_child_packing(expander, expand=False, fill=False, padding=0, pack_type=gtk.PACK_START)
main_hbox = gtk.HBox(False, 4)
main_hbox.show()
main_vbox = gtk.VBox(False, 4)
main_vbox.show()
self.win.vbox.add(main_hbox)
main_hbox.add(main_vbox)
self.tx_list = self.PacketList()
self.tx_list.show()
group = gtk.Expander("Last transmitted packets")
group.show()
group.add(self.tx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.rx_list = self.PacketList()
self.rx_list.show()
group = gtk.Expander("Last received packets")
group.show()
group.add(self.rx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.drop_list = self.PacketList()
self.drop_list.show()
group = gtk.Expander("Last dropped packets")
group.show()
group.add(self.drop_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
# Packet Filter
# - options
self.packet_capture_options = ns.visualizer.PyViz.PacketCaptureOptions()
self.packet_capture_options.numLastPackets = 100
packet_filter_vbox = gtk.VBox(False, 4)
packet_filter_vbox.show()
main_hbox.add(packet_filter_vbox)
sel_buttons_box = gtk.HButtonBox()
sel_buttons_box.show()
packet_filter_vbox.pack_start(sel_buttons_box, False, False, 4)
select_all_button = gobject.new(gtk.Button, label="Sel. All", visible=True)
select_none_button = gobject.new(gtk.Button, label="Sel. None", visible=True)
sel_buttons_box.add(select_all_button)
sel_buttons_box.add(select_none_button)
self.packet_filter_widget = ObjectList([
Column('selected', title="Sel.", data_type=bool, editable=True),
Column('name', title="Header"),
], sortable=True)
self.packet_filter_widget.show()
packet_filter_vbox.pack_start(self.packet_filter_widget, True, True, 4)
class TypeIdConfig(object):
__slots__ = ['name', 'selected', 'typeid']
self.packet_filter_list = [] # list of TypeIdConfig instances
Header = ns.core.TypeId.LookupByName("ns3::Header")
Trailer = ns.core.TypeId.LookupByName("ns3::Trailer")
for typeid_i in range(ns.core.TypeId.GetRegisteredN()):
typeid = ns.core.TypeId.GetRegistered(typeid_i)
# check if this is a header or trailer subtype
typeid_tmp = typeid
type_is_good = False
while 1:
if typeid_tmp == Header or typeid_tmp == Trailer:
type_is_good = True
break
if typeid_tmp.HasParent():
typeid_tmp = typeid_tmp.GetParent()
else:
break
if not type_is_good:
continue
if typeid in [Header, Trailer]:
continue
c = TypeIdConfig()
c.selected = True
c.name = typeid.GetName()
c.typeid = typeid
self.packet_filter_list.append(c)
self.packet_filter_widget.add_list(self.packet_filter_list)
def update_capture_options():
if self.op_AND_button.props.active:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_AND
else:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_OR
self.packet_capture_options.numLastPackets = 100
self.packet_capture_options.headers = [c.typeid for c in self.packet_filter_list if c.selected]
self.visualizer.simulation.lock.acquire()
try:
self.visualizer.simulation.sim_helper.SetPacketCaptureOptions(
self.node.GetId(), self.packet_capture_options)
finally:
self.visualizer.simulation.lock.release()
def sel_all_cb(bt):
for c in self.packet_filter_list:
c.selected = True
self.packet_filter_widget.refresh()
update_capture_options()
def sel_none_cb(bt):
for c in self.packet_filter_list:
c.selected = False
self.packet_filter_widget.refresh()
update_capture_options()
select_all_button.connect("clicked", sel_all_cb)
select_none_button.connect("clicked", sel_none_cb)
op_buttons_box = gtk.HButtonBox()
op_buttons_box.show()
packet_filter_vbox.pack_start(op_buttons_box, False, False, 4)
self.op_AND_button = gobject.new(gtk.RadioButton, label="AND", visible=True)
self.op_OR_button = gobject.new(gtk.RadioButton, label="OR", visible=True, group=self.op_AND_button)
op_buttons_box.add(self.op_AND_button)
op_buttons_box.add(self.op_OR_button)
self.op_OR_button.props.active = True
self.op_AND_button.connect("toggled", lambda b: update_capture_options())
def cell_edited(l, obj, attribute):
update_capture_options()
self.packet_filter_widget.connect("cell-edited", cell_edited)
update_capture_options()
self.visualizer.add_information_window(self)
self.win.set_default_size(600, 300)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
last_packets = self.visualizer.simulation.sim_helper.GetLastPackets(self.node.GetId())
self.tx_list.update(self.node, last_packets.lastTransmittedPackets)
self.rx_list.update(self.node, last_packets.lastReceivedPackets)
self.drop_list.update(self.node, last_packets.lastDroppedPackets)
def populate_node_menu(viz, node, menu):
menu_item = gtk.MenuItem("Show Last Packets")
menu_item.show()
def _show_it(dummy_menu_item):
ShowLastPackets(viz, node.node_index)
menu_item.connect("activate", _show_it)
menu.add(menu_item)
def register(viz):
viz.connect("populate-node-menu", populate_node_menu)
| gpl-2.0 |
voidbridge/electron | script/test.py | 25 | 2679 | #!/usr/bin/env python
import os
import shutil
import subprocess
import sys
from lib.util import electron_gyp, rm_rf
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
def main():
os.chdir(SOURCE_ROOT)
config = 'D'
if len(sys.argv) == 2 and sys.argv[1] == '-R':
config = 'R'
if sys.platform == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
resources_path = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'Resources')
elif sys.platform == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.exe'.format(PROJECT_NAME))
resources_path = os.path.join(SOURCE_ROOT, 'out', config)
else:
electron = os.path.join(SOURCE_ROOT, 'out', config, PROJECT_NAME)
resources_path = os.path.join(SOURCE_ROOT, 'out', config)
use_instrumented_asar = '--use-instrumented-asar' in sys.argv
returncode = 0
try:
if use_instrumented_asar:
install_instrumented_asar_file(resources_path)
subprocess.check_call([electron, 'spec'] + sys.argv[1:])
except subprocess.CalledProcessError as e:
returncode = e.returncode
except KeyboardInterrupt:
returncode = 0
if use_instrumented_asar:
restore_uninstrumented_asar_file(resources_path)
if os.environ.has_key('OUTPUT_TO_FILE'):
output_to_file = os.environ['OUTPUT_TO_FILE']
with open(output_to_file, 'r') as f:
print f.read()
rm_rf(output_to_file)
return returncode
def install_instrumented_asar_file(resources_path):
asar_path = os.path.join(resources_path, '{0}.asar'.format(PROJECT_NAME))
uninstrumented_path = os.path.join(resources_path,
'{0}-original.asar'.format(PROJECT_NAME))
instrumented_path = os.path.join(SOURCE_ROOT, 'out', 'coverage',
'{0}.asar'.format(PROJECT_NAME))
shutil.move(asar_path, uninstrumented_path)
shutil.move(instrumented_path, asar_path)
def restore_uninstrumented_asar_file(resources_path):
asar_path = os.path.join(resources_path, '{0}.asar'.format(PROJECT_NAME))
uninstrumented_path = os.path.join(resources_path,
'{0}-original.asar'.format(PROJECT_NAME))
os.remove(asar_path)
shutil.move(uninstrumented_path, asar_path)
if __name__ == '__main__':
sys.exit(main())
| mit |
psav/cfme_tests | cfme/tests/openstack/cloud/test_instances.py | 2 | 8199 | """Tests for Openstack cloud instances"""
import fauxfactory
import pytest
from selenium.common.exceptions import TimeoutException
from wait_for import TimedOutError
from cfme.cloud.instance.openstack import OpenStackInstance
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.exceptions import ItemNotFound
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
from cfme.utils.version import current_version
pytestmark = [
pytest.mark.usefixtures("setup_provider_modscope"),
pytest.mark.provider([OpenStackProvider], scope='module')
]
@pytest.fixture(scope='function')
def new_instance(provider):
prov_data = provider.data['provisioning']
instance = OpenStackInstance(fauxfactory.gen_alpha(), provider,
template_name=prov_data['image']['name'])
prov_form_data = {
'request': {'email': fauxfactory.gen_email(),
'first_name': fauxfactory.gen_alpha(),
'last_name': fauxfactory.gen_alpha()},
'catalog': {'num_vms': '1',
'vm_name': instance.name},
'environment': {'cloud_network': prov_data['cloud_network']},
'properties': {'instance_type': prov_data['instance_type']},
}
instance.create(False, **prov_form_data)
instance.wait_to_appear()
yield instance
try:
instance.power_control_from_provider(OpenStackInstance.TERMINATE)
except:
pass
def test_create_instance(new_instance, soft_assert):
"""Creates an instance and verifies it appears on UI"""
view = navigate_to(new_instance, 'Details')
prov_data = new_instance.provider.data['provisioning']
power_state = view.entities.summary('Power Management').get_text_of('Power State')
assert power_state == OpenStackInstance.STATE_ON
vm_tmplt = view.entities.summary('Relationships').get_text_of('VM Template')
soft_assert(vm_tmplt == prov_data['image']['name'])
# Assert other relationships in a loop
props = [('Availability Zone', 'availability_zone'),
('Cloud Tenants', 'cloud_tenant'),
('Flavor', 'instance_type')]
if current_version() >= '5.7':
props.append(('Virtual Private Cloud', 'cloud_network'))
for p in props:
v = view.entities.summary('Relationships').get_text_of(p[0])
soft_assert(v == prov_data[p[1]])
def test_stop_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.STOP)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_OFF)
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state == OpenStackInstance.STATE_OFF
def test_suspend_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.SUSPEND)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_SUSPENDED)
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state == OpenStackInstance.STATE_SUSPENDED
def test_pause_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.PAUSE)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_PAUSED)
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state == OpenStackInstance.STATE_PAUSED
def test_shelve_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.SHELVE)
try:
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_SHELVED)
except TimedOutError:
logger.warning("Timeout when waiting for instance state: 'shelved'. Skipping")
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state in (OpenStackInstance.STATE_SHELVED_OFFLOAD,
OpenStackInstance.STATE_SHELVED)
def test_shelve_offload_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.SHELVE)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_SHELVED)
try:
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.SHELVE_OFFLOAD)
except TimeoutException:
logger.warning("Timeout when initiating power state 'Shelve Offload'. Skipping")
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_SHELVED_OFFLOAD)
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state == OpenStackInstance.STATE_SHELVED_OFFLOAD
def test_start_instance(new_instance):
new_instance.power_control_from_provider(OpenStackInstance.STOP)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_OFF)
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.START)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_ON)
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state == OpenStackInstance.STATE_ON
def test_soft_reboot_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.SOFT_REBOOT)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_REBOOTING)
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state in (OpenStackInstance.STATE_ON,
OpenStackInstance.STATE_REBOOTING)
def test_hard_reboot_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.HARD_REBOOT)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_REBOOTING)
view = navigate_to(new_instance, 'Details')
state = view.entities.summary('Power Management').get_text_of('Power State')
assert state in (OpenStackInstance.STATE_ON,
OpenStackInstance.STATE_REBOOTING)
def test_delete_instance(new_instance):
new_instance.power_control_from_cfme(from_details=True,
option=OpenStackInstance.TERMINATE)
new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_UNKNOWN)
assert new_instance.name not in new_instance.provider.mgmt.list_vm()
view = navigate_to(new_instance, 'AllForProvider')
try:
view.entities.get_entity(name=new_instance.name, surf_pages=True)
assert False, "entity still exists"
except ItemNotFound:
pass
def test_list_vms_infra_node(appliance, provider, soft_assert):
if not getattr(provider, 'infra_provider', None):
pytest.skip("Provider {prov} doesn't have infra provider set".format(prov=provider.name))
host_collection = appliance.collections.hosts
# Match hypervisors by IP with count of running VMs
hvisors = {hv.host_ip: hv.running_vms for hv in provider.mgmt.api.hypervisors.list()}
# Skip non-compute nodes
hosts = [host for host in host_collection.all(provider) if 'Compute' in host.name]
assert hosts
for host in hosts:
view = navigate_to(host, 'Details')
host_ip = view.entities.summary('Properties').get_text_of('IP Address')
vms = int(view.entities.summary('Relationships').get_text_of('VMs'))
soft_assert(vms == hvisors[host_ip],
'Number of instances on UI does not match with real value')
| gpl-2.0 |
NelisVerhoef/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
dkindel/ece5574team7RESTAPI | lib/werkzeug/security.py | 255 | 8971 | # -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import hmac
import hashlib
import posixpath
import codecs
from struct import Struct
from random import SystemRandom
from operator import xor
from itertools import starmap
from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
string_types, to_native
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
DEFAULT_PBKDF2_ITERATIONS = 1000
_pack_int = Struct('>I').pack
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _find_hashlib_algorithms():
algos = getattr(hashlib, 'algorithms', None)
if algos is None:
algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
rv = {}
for algo in algos:
func = getattr(hashlib, algo, None)
if func is not None:
rv[algo] = func
return rv
_hash_funcs = _find_hashlib_algorithms()
def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided,
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function, or a function
from the hashlib module. Defaults to sha1.
"""
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
return to_native(codecs.encode(rv, 'hex_codec'))
_has_native_pbkdf2 = hasattr(hashlib, 'pbkdf2_hmac')
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` times and produces a
key of `keylen` bytes. By default, SHA-1 is used as hash function;
a different hashlib `hashfunc` can be provided.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
if isinstance(hashfunc, string_types):
hashfunc = _hash_funcs[hashfunc]
elif not hashfunc:
hashfunc = hashlib.sha1
data = to_bytes(data)
salt = to_bytes(salt)
# If we're on Python with pbkdf2_hmac we can try to use it for
# compatible digests.
if _has_native_pbkdf2:
_test_hash = hashfunc()
if hasattr(_test_hash, 'name') and \
_test_hash.name in _hash_funcs:
return hashlib.pbkdf2_hmac(_test_hash.name,
data, salt, iterations,
keylen)
mac = hmac.HMAC(data, None, hashfunc)
if not keylen:
keylen = mac.digest_size
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return bytearray(h.digest())
buf = bytearray()
for block in range_type(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in range_type(iterations - 1):
u = _pseudorandom(bytes(u))
rv = bytearray(starmap(xor, izip(rv, u)))
buf.extend(rv)
return bytes(buf[:keylen])
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal, or `False` if they are not.
.. versionadded:: 0.7
"""
if isinstance(a, text_type):
a = a.encode('utf-8')
if isinstance(b, text_type):
b = b.encode('utf-8')
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if PY2:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
else:
for x, y in izip(a, b):
rv |= x ^ y
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('Salt length must be positive')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password, method
if isinstance(password, text_type):
password = password.encode('utf-8')
if method.startswith('pbkdf2:'):
args = method[7:].split(':')
if len(args) not in (1, 2):
raise ValueError('Invalid number of arguments for PBKDF2')
method = args.pop(0)
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
is_pbkdf2 = True
actual_method = 'pbkdf2:%s:%d' % (method, iterations)
else:
is_pbkdf2 = False
actual_method = method
hash_func = _hash_funcs.get(method)
if hash_func is None:
raise TypeError('invalid method %r' % method)
if is_pbkdf2:
if not salt:
raise ValueError('Salt is required for PBKDF2')
rv = pbkdf2_hex(password, salt, iterations,
hashfunc=hash_func)
elif salt:
if isinstance(salt, text_type):
salt = salt.encode('utf-8')
rv = hmac.HMAC(salt, password, hash_func).hexdigest()
else:
h = hash_func()
h.update(password)
rv = h.hexdigest()
return rv, actual_method
def generate_password_hash(password, method='pbkdf2:sha1', salt_length=8):
"""Hash a password with the given method and salt with with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha1:2000$salt$hash
pbkdf2:sha1$salt$hash
:param password: the password to hash.
:param method: the hash method to use (one that hashlib supports). Can
optionally be in the format ``pbkdf2:<method>[:iterations]``
to enable PBKDF2.
:param salt_length: the length of the salt in letters.
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h, actual_method = _hash_internal(method, salt, password)
return '%s$%s$%s' % (actual_method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`.
:param password: the plaintext password to compare against the hash.
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory, filename):
"""Safely join `directory` and `filename`. If this cannot be done,
this function returns ``None``.
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename.startswith('../'):
return None
return os.path.join(directory, filename)
| apache-2.0 |
jayme-github/CouchPotatoServer | libs/pyutil/test/out_of_shape/test_strutil.py | 106 | 1713 | #!/usr/bin/env python
# Copyright (c) 2004-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import unittest
from pyutil.assertutil import _assert
from pyutil import strutil
class Teststrutil(unittest.TestCase):
def test_short_input(self):
self.failUnless(strutil.pop_trailing_newlines("\r\n") == "")
self.failUnless(strutil.pop_trailing_newlines("\r") == "")
self.failUnless(strutil.pop_trailing_newlines("x\r\n") == "x")
self.failUnless(strutil.pop_trailing_newlines("x\r") == "x")
def test_split(self):
_assert(strutil.split_on_newlines("x\r\ny") == ["x", "y",], strutil.split_on_newlines("x\r\ny"))
_assert(strutil.split_on_newlines("x\r\ny\r\n") == ["x", "y", '',], strutil.split_on_newlines("x\r\ny\r\n"))
_assert(strutil.split_on_newlines("x\n\ny\n\n") == ["x", '', "y", '', '',], strutil.split_on_newlines("x\n\ny\n\n"))
def test_commonprefix(self):
_assert(strutil.commonprefix(["foo","foobarooo", "foosplat",]) == 'foo', strutil.commonprefix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonprefix(["foo","afoobarooo", "foosplat",]) == '', strutil.commonprefix(["foo","afoobarooo", "foosplat",]))
def test_commonsuffix(self):
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplat",]) == '', strutil.commonsuffix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplato",]) == 'o', strutil.commonsuffix(["foo","foobarooo", "foosplato",]))
_assert(strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]) == 'foo', strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]))
| gpl-3.0 |
CEG-FYP-OpenStack/scheduler | nova/api/openstack/compute/legacy_v2/consoles.py | 79 | 3328 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
from nova.api.openstack import wsgi
from nova.console import api as console_api
from nova import exception
def _translate_keys(cons):
"""Coerces a console instance into proper dictionary format."""
pool = cons['pool']
info = {'id': cons['id'],
'console_type': pool['console_type']}
return dict(console=info)
def _translate_detail_keys(cons):
"""Coerces a console instance into proper dictionary format with
correctly mapped attributes.
"""
pool = cons['pool']
info = {'id': cons['id'],
'console_type': pool['console_type'],
'password': cons['password'],
'instance_name': cons['instance_name'],
'port': cons['port'],
'host': pool['public_hostname']}
return dict(console=info)
class Controller(object):
"""The Consoles controller for the OpenStack API."""
def __init__(self):
self.console_api = console_api.API()
def index(self, req, server_id):
"""Returns a list of consoles for this instance."""
consoles = self.console_api.get_consoles(
req.environ['nova.context'],
server_id)
return dict(consoles=[_translate_keys(console)
for console in consoles])
def create(self, req, server_id, body):
"""Creates a new console."""
try:
self.console_api.create_console(
req.environ['nova.context'],
server_id)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
def show(self, req, server_id, id):
"""Shows in-depth information on a specific console."""
try:
console = self.console_api.get_console(
req.environ['nova.context'],
server_id,
int(id))
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return _translate_detail_keys(console)
def delete(self, req, server_id, id):
"""Deletes a console."""
try:
self.console_api.delete_console(req.environ['nova.context'],
server_id,
int(id))
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
def create_resource():
return wsgi.Resource(Controller())
| apache-2.0 |
phil65/script.skin.info.service | default.py | 1 | 11502 | import sys
import xbmc
import xbmcgui
import xbmcaddon
from Utils import *
ADDON = xbmcaddon.Addon()
ADDON_VERSION = ADDON.getAddonInfo('version')
WND = xbmcgui.Window(12003) # Video info dialog
HOME = xbmcgui.Window(10000) # Home Window
class Daemon:
def __init__(self):
log("version %s started" % ADDON_VERSION)
self._init_vars()
self.run_backend()
def _init_vars(self):
self.id = None
self.type = False
self.Artist_mbid = None
def run_backend(self):
self._stop = False
self.previousitem = ""
log("starting backend")
while (not self._stop) and (not xbmc.abortRequested):
if xbmc.getCondVisibility("Container.Content(movies) | Container.Content(sets) | Container.Content(artists) | Container.Content(albums) | Container.Content(episodes) | Container.Content(musicvideos)"):
self.selecteditem = xbmc.getInfoLabel("ListItem.DBID")
if (self.selecteditem != self.previousitem):
self.previousitem = self.selecteditem
if (self.selecteditem is not "") and (self.selecteditem > -1):
if xbmc.getCondVisibility("Container.Content(artists)"):
self._set_artist_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(albums)"):
self._set_album_details(self.selecteditem)
elif xbmc.getCondVisibility("SubString(ListItem.Path,videodb://movies/sets/,left)"):
self._set_movieset_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(movies)"):
self._set_movie_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(episodes)"):
self._set_episode_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(musicvideos)"):
self._set_musicvideo_details(self.selecteditem)
else:
clear_properties()
else:
clear_properties()
elif xbmc.getCondVisibility("Container.Content(seasons) + !Window.IsActive(movieinformation)"):
HOME.setProperty("SeasonPoster", xbmc.getInfoLabel("ListItem.Icon"))
HOME.setProperty("SeasonID", xbmc.getInfoLabel("ListItem.DBID"))
HOME.setProperty("SeasonNumber", xbmc.getInfoLabel("ListItem.Season"))
elif xbmc.getCondVisibility("Window.IsActive(videos) + [Container.Content(directors) | Container.Content(actors) | Container.Content(genres) | Container.Content(years) | Container.Content(studios) | Container.Content(countries) | Container.Content(tags)]"):
self.selecteditem = xbmc.getInfoLabel("ListItem.Label")
if (self.selecteditem != self.previousitem):
clear_properties()
self.previousitem = self.selecteditem
if (self.selecteditem != "") and (self.selecteditem != ".."):
self.setMovieDetailsforCategory()
elif xbmc.getCondVisibility("Container.Content(years) | Container.Content(genres)"):
self.selecteditem = xbmc.getInfoLabel("ListItem.Label")
if (self.selecteditem != self.previousitem):
clear_properties()
self.previousitem = self.selecteditem
if (self.selecteditem != "") and (self.selecteditem != ".."):
self.setMusicDetailsforCategory()
elif xbmc.getCondVisibility('Window.IsActive(screensaver)'):
xbmc.sleep(1000)
else:
self.previousitem = ""
self.selecteditem = ""
clear_properties()
xbmc.sleep(500)
if xbmc.getCondVisibility("IsEmpty(Window(home).Property(skininfos_daemon_running))"):
clear_properties()
self._stop = True
xbmc.sleep(100)
def _set_song_details(self, dbid): # unused, needs fixing
json_response = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMusicVideos", "params": {"properties": ["artist", "file"], "sort": { "method": "artist" } }, "id": 1}')
clear_properties()
if ("result" in json_response) and ('musicvideos' in json_response['result']):
set_movie_properties(json_query)
def _set_artist_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "AudioLibrary.GetAlbums", "params": {"properties": ["title", "year", "albumlabel", "playcount", "thumbnail"], "sort": { "method": "label" }, "filter": {"artistid": %s} }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('albums' in json_response['result']):
set_artist_properties(json_response)
def _set_movie_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails","set","setid","cast"], "movieid":%s }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('moviedetails' in json_response['result']):
self._set_properties(json_response['result']['moviedetails'])
def _set_episode_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails","tvshowid","season"], "episodeid":%s }, "id": 1}' % dbid)
clear_properties()
if ('result' in json_response) and ('episodedetails' in json_response['result']):
self._set_properties(json_response['result']['episodedetails'])
seasonnumber = json_response['result']['episodedetails']['season']
tvshowid = json_response['result']['episodedetails']['tvshowid']
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetSeasons", "params": {"properties": ["thumbnail"], "tvshowid":%s }, "id": 1}' % tvshowid)
for season in json_response["result"]["seasons"]:
if season["label"].split(" ")[-1] == str(seasonnumber):
HOME.setProperty('SeasonPoster', season["thumbnail"])
def _set_musicvideo_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMusicVideoDetails", "params": {"properties": ["streamdetails"], "musicvideoid":%s }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('musicvideodetails' in json_response['result']):
self._set_properties(json_response['result']['musicvideodetails'])
def _set_album_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "AudioLibrary.GetSongs", "params": {"properties": ["title", "track", "duration", "file", "lastplayed", "disc"], "sort": { "method": "label" }, "filter": {"albumid": %s} }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('songs' in json_response['result']):
set_album_properties(json_response)
def _set_movieset_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieSetDetails", "params": {"setid": %s, "properties": [ "thumbnail" ], "movies": { "properties": [ "rating", "art", "file", "year", "director", "writer","genre" , "thumbnail", "runtime", "studio", "plotoutline", "plot", "country", "streamdetails"], "sort": { "order": "ascending", "method": "year" }} },"id": 1 }' % dbid)
clear_properties()
if ("result" in json_response) and ('setdetails' in json_response['result']):
set_movie_properties(json_response)
def setMovieDetailsforCategory(self):
if xbmc.getInfoLabel("ListItem.Label") != "..":
count = 1
path = xbmc.getInfoLabel("ListItem.FolderPath")
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "Files.GetDirectory", "params": {"directory": "%s", "media": "video", "properties": ["art"]}, "id": 1}' % (path))
if ("result" in json_response) and ("files" in json_response["result"]):
for movie in json_response["result"]["files"]:
HOME.setProperty('Detail.Movie.%i.Path' % (count), movie["file"])
HOME.setProperty('Detail.Movie.%i.Art(fanart)' % (count), movie["art"].get('fanart', ''))
HOME.setProperty('Detail.Movie.%i.Art(poster)' % (count), movie["art"].get('poster', ''))
count += 1
if count > 19:
break
def setMusicDetailsforCategory(self):
if xbmc.getInfoLabel("ListItem.Label") != "..":
count = 1
path = xbmc.getInfoLabel("ListItem.FolderPath")
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "Files.GetDirectory", "params": {"directory": "%s", "media": "music", "properties": ["fanart", "thumbnail"]}, "id": 1}' % (path))
if ("result" in json_response) and ("files" in json_response["result"]):
for artist in json_response["result"]["files"]:
if "id" in artist:
HOME.setProperty('Detail.Music.%i.DBID' % (count), str(artist["id"]))
HOME.setProperty('Detail.Music.%i.Art(fanart)' % (count), artist["fanart"])
HOME.setProperty('Detail.Music.%i.Art(thumb)' % (count), artist["thumbnail"])
count += 1
if count > 19:
break
def _set_properties(self, results):
# Set language properties
count = 1
audio = results['streamdetails']['audio']
subtitles = results['streamdetails']['subtitle']
subs = []
streams = []
# Clear properties before setting new ones
clear_properties()
for item in audio:
if str(item['language']) not in streams:
streams.append(str(item['language']))
WND.setProperty('AudioLanguage.%d' % count, item['language'])
WND.setProperty('AudioCodec.%d' % count, item['codec'])
WND.setProperty('AudioChannels.%d' % count, str(item['channels']))
count += 1
count = 1
for item in subtitles:
if str(item['language']) not in subtitles:
subs.append(str(item['language']))
WND.setProperty('SubtitleLanguage.%d' % count, item['language'])
count += 1
WND.setProperty('SubtitleLanguage', " / ".join(subs))
WND.setProperty('AudioLanguage', " / ".join(streams))
WND.setProperty('SubtitleLanguage.Count', str(len(subs)))
WND.setProperty('AudioLanguage.Count', str(len(streams)))
try:
params = dict(arg.split("=") for arg in sys.argv[1].split("&"))
except:
params = {}
if xbmc.getCondVisibility("IsEmpty(Window(home).Property(skininfos_daemon_running))"):
xbmc.executebuiltin('SetProperty(skininfos_daemon_running,True,home)')
log("starting daemon")
Daemon()
else:
log("Daemon already active")
log('finished')
| gpl-2.0 |
Jasoning/namebench | nb_third_party/dns/tokenizer.py | 246 | 17962 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Tokenize DNS master file format"""
import cStringIO
import sys
import dns.exception
import dns.name
import dns.ttl
_DELIMITERS = {
' ' : True,
'\t' : True,
'\n' : True,
';' : True,
'(' : True,
')' : True,
'"' : True }
_QUOTING_DELIMITERS = { '"' : True }
EOF = 0
EOL = 1
WHITESPACE = 2
IDENTIFIER = 3
QUOTED_STRING = 4
COMMENT = 5
DELIMITER = 6
class UngetBufferFull(dns.exception.DNSException):
"""Raised when an attempt is made to unget a token when the unget
buffer is full."""
pass
class Token(object):
"""A DNS master file format token.
@ivar ttype: The token type
@type ttype: int
@ivar value: The token value
@type value: string
@ivar has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
def __init__(self, ttype, value='', has_escape=False):
"""Initialize a token instance.
@param ttype: The token type
@type ttype: int
@ivar value: The token value
@type value: string
@ivar has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
self.ttype = ttype
self.value = value
self.has_escape = has_escape
def is_eof(self):
return self.ttype == EOF
def is_eol(self):
return self.ttype == EOL
def is_whitespace(self):
return self.ttype == WHITESPACE
def is_identifier(self):
return self.ttype == IDENTIFIER
def is_quoted_string(self):
return self.ttype == QUOTED_STRING
def is_comment(self):
return self.ttype == COMMENT
def is_delimiter(self):
return self.ttype == DELIMITER
def is_eol_or_eof(self):
return (self.ttype == EOL or self.ttype == EOF)
def __eq__(self, other):
if not isinstance(other, Token):
return False
return (self.ttype == other.ttype and
self.value == other.value)
def __ne__(self, other):
if not isinstance(other, Token):
return True
return (self.ttype != other.ttype or
self.value != other.value)
def __str__(self):
return '%d "%s"' % (self.ttype, self.value)
def unescape(self):
if not self.has_escape:
return self
unescaped = ''
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if i >= l:
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
unescaped += c
return Token(self.ttype, unescaped)
# compatibility for old-style tuple tokens
def __len__(self):
return 2
def __iter__(self):
return iter((self.ttype, self.value))
def __getitem__(self, i):
if i == 0:
return self.ttype
elif i == 1:
return self.value
else:
raise IndexError
class Tokenizer(object):
"""A DNS master file format tokenizer.
A token is a (type, value) tuple, where I{type} is an int, and
I{value} is a string. The valid types are EOF, EOL, WHITESPACE,
IDENTIFIER, QUOTED_STRING, COMMENT, and DELIMITER.
@ivar file: The file to tokenize
@type file: file
@ivar ungotten_char: The most recently ungotten character, or None.
@type ungotten_char: string
@ivar ungotten_token: The most recently ungotten token, or None.
@type ungotten_token: (int, string) token tuple
@ivar multiline: The current multiline level. This value is increased
by one every time a '(' delimiter is read, and decreased by one every time
a ')' delimiter is read.
@type multiline: int
@ivar quoting: This variable is true if the tokenizer is currently
reading a quoted string.
@type quoting: bool
@ivar eof: This variable is true if the tokenizer has encountered EOF.
@type eof: bool
@ivar delimiters: The current delimiter dictionary.
@type delimiters: dict
@ivar line_number: The current line number
@type line_number: int
@ivar filename: A filename that will be returned by the L{where} method.
@type filename: string
"""
def __init__(self, f=sys.stdin, filename=None):
"""Initialize a tokenizer instance.
@param f: The file to tokenize. The default is sys.stdin.
This parameter may also be a string, in which case the tokenizer
will take its input from the contents of the string.
@type f: file or string
@param filename: the name of the filename that the L{where} method
will return.
@type filename: string
"""
if isinstance(f, str):
f = cStringIO.StringIO(f)
if filename is None:
filename = '<string>'
else:
if filename is None:
if f is sys.stdin:
filename = '<stdin>'
else:
filename = '<file>'
self.file = f
self.ungotten_char = None
self.ungotten_token = None
self.multiline = 0
self.quoting = False
self.eof = False
self.delimiters = _DELIMITERS
self.line_number = 1
self.filename = filename
def _get_char(self):
"""Read a character from input.
@rtype: string
"""
if self.ungotten_char is None:
if self.eof:
c = ''
else:
c = self.file.read(1)
if c == '':
self.eof = True
elif c == '\n':
self.line_number += 1
else:
c = self.ungotten_char
self.ungotten_char = None
return c
def where(self):
"""Return the current location in the input.
@rtype: (string, int) tuple. The first item is the filename of
the input, the second is the current line number.
"""
return (self.filename, self.line_number)
def _unget_char(self, c):
"""Unget a character.
The unget buffer for characters is only one character large; it is
an error to try to unget a character when the unget buffer is not
empty.
@param c: the character to unget
@type c: string
@raises UngetBufferFull: there is already an ungotten char
"""
if not self.ungotten_char is None:
raise UngetBufferFull
self.ungotten_char = c
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1
def get(self, want_leading = False, want_comment = False):
"""Get the next token.
@param want_leading: If True, return a WHITESPACE token if the
first character read is whitespace. The default is False.
@type want_leading: bool
@param want_comment: If True, return a COMMENT token if the
first token read is a comment. The default is False.
@type want_comment: bool
@rtype: Token object
@raises dns.exception.UnexpectedEnd: input ended prematurely
@raises dns.exception.SyntaxError: input was badly formed
"""
if not self.ungotten_token is None:
token = self.ungotten_token
self.ungotten_token = None
if token.is_whitespace():
if want_leading:
return token
elif token.is_comment():
if want_comment:
return token
else:
return token
skipped = self.skip_whitespace()
if want_leading and skipped > 0:
return Token(WHITESPACE, ' ')
token = ''
ttype = IDENTIFIER
has_escape = False
while True:
c = self._get_char()
if c == '' or c in self.delimiters:
if c == '' and self.quoting:
raise dns.exception.UnexpectedEnd
if token == '' and ttype != QUOTED_STRING:
if c == '(':
self.multiline += 1
self.skip_whitespace()
continue
elif c == ')':
if not self.multiline > 0:
raise dns.exception.SyntaxError
self.multiline -= 1
self.skip_whitespace()
continue
elif c == '"':
if not self.quoting:
self.quoting = True
self.delimiters = _QUOTING_DELIMITERS
ttype = QUOTED_STRING
continue
else:
self.quoting = False
self.delimiters = _DELIMITERS
self.skip_whitespace()
continue
elif c == '\n':
return Token(EOL, '\n')
elif c == ';':
while 1:
c = self._get_char()
if c == '\n' or c == '':
break
token += c
if want_comment:
self._unget_char(c)
return Token(COMMENT, token)
elif c == '':
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
return Token(EOF)
elif self.multiline:
self.skip_whitespace()
token = ''
continue
else:
return Token(EOL, '\n')
else:
# This code exists in case we ever want a
# delimiter to be returned. It never produces
# a token currently.
token = c
ttype = DELIMITER
else:
self._unget_char(c)
break
elif self.quoting:
if c == '\\':
c = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if c.isdigit():
c2 = self._get_char()
if c2 == '':
raise dns.exception.UnexpectedEnd
c3 = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
elif c == '\n':
raise dns.exception.SyntaxError('newline in quoted string')
elif c == '\\':
#
# It's an escape. Put it and the next character into
# the token; it will be checked later for goodness.
#
token += c
has_escape = True
c = self._get_char()
if c == '' or c == '\n':
raise dns.exception.UnexpectedEnd
token += c
if token == '' and ttype != QUOTED_STRING:
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
ttype = EOF
return Token(ttype, token, has_escape)
def unget(self, token):
"""Unget a token.
The unget buffer for tokens is only one token large; it is
an error to try to unget a token when the unget buffer is not
empty.
@param token: the token to unget
@type token: Token object
@raises UngetBufferFull: there is already an ungotten token
"""
if not self.ungotten_token is None:
raise UngetBufferFull
self.ungotten_token = token
def next(self):
"""Return the next item in an iteration.
@rtype: (int, string)
"""
token = self.get()
if token.is_eof():
raise StopIteration
return token
def __iter__(self):
return self
# Helpers
def get_int(self):
"""Read the next token and interpret it as an integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
return int(token.value)
def get_uint8(self):
"""Read the next token and interpret it as an 8-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 255:
raise dns.exception.SyntaxError('%d is not an unsigned 8-bit integer' % value)
return value
def get_uint16(self):
"""Read the next token and interpret it as a 16-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 65535:
raise dns.exception.SyntaxError('%d is not an unsigned 16-bit integer' % value)
return value
def get_uint32(self):
"""Read the next token and interpret it as a 32-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
value = long(token.value)
if value < 0 or value > 4294967296L:
raise dns.exception.SyntaxError('%d is not an unsigned 32-bit integer' % value)
return value
def get_string(self, origin=None):
"""Read the next token and interpret it as a string.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
return token.value
def get_identifier(self, origin=None):
"""Read the next token and raise an exception if it is not an identifier.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return token.value
def get_name(self, origin=None):
"""Read the next token and interpret it as a DNS name.
@raises dns.exception.SyntaxError:
@rtype: dns.name.Name object"""
token = self.get()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.name.from_text(token.value, origin)
def get_eol(self):
"""Read the next token and raise an exception if it isn't EOL or
EOF.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get()
if not token.is_eol_or_eof():
raise dns.exception.SyntaxError('expected EOL or EOF, got %d "%s"' % (token.ttype, token.value))
return token.value
def get_ttl(self):
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.ttl.from_text(token.value)
| apache-2.0 |
kapt/django-oscar | tests/unit/shipping/method_tests.py | 10 | 2641 | from decimal import Decimal as D
from django.test import TestCase
from nose.plugins.attrib import attr
import mock
from oscar.apps.shipping import methods
from oscar.apps.basket.models import Basket
@attr('shipping')
class TestFreeShipppingForEmptyBasket(TestCase):
def setUp(self):
self.method = methods.Free()
self.basket = Basket()
self.charge = self.method.calculate(self.basket)
def test_is_free(self):
self.assertEqual(D('0.00'), self.charge.incl_tax)
self.assertEqual(D('0.00'), self.charge.excl_tax)
def test_has_tax_known(self):
self.assertTrue(self.charge.is_tax_known)
def test_has_same_currency_as_basket(self):
self.assertEqual(self.basket.currency, self.charge.currency)
@attr('shipping')
class TestFreeShipppingForNonEmptyBasket(TestCase):
def setUp(self):
self.method = methods.Free()
self.basket = mock.Mock()
self.basket.num_items = 1
self.charge = self.method.calculate(self.basket)
def test_is_free(self):
self.assertEqual(D('0.00'), self.charge.incl_tax)
self.assertEqual(D('0.00'), self.charge.excl_tax)
@attr('shipping')
class TestNoShippingRequired(TestCase):
def setUp(self):
self.method = methods.NoShippingRequired()
basket = Basket()
self.charge = self.method.calculate(basket)
def test_is_free_for_empty_basket(self):
self.assertEqual(D('0.00'), self.charge.incl_tax)
self.assertEqual(D('0.00'), self.charge.excl_tax)
def test_has_a_different_code_to_free(self):
self.assertTrue(methods.NoShippingRequired.code !=
methods.Free.code)
@attr('shipping')
class TestFixedPriceShippingWithoutTax(TestCase):
def setUp(self):
self.method = methods.FixedPrice(D('10.00'))
basket = Basket()
self.charge = self.method.calculate(basket)
def test_has_correct_charge(self):
self.assertEqual(D('10.00'), self.charge.excl_tax)
def test_does_not_include_tax(self):
self.assertFalse(self.charge.is_tax_known)
@attr('shipping')
class TestFixedPriceShippingWithTax(TestCase):
def setUp(self):
self.method = methods.FixedPrice(
charge_excl_tax=D('10.00'),
charge_incl_tax=D('12.00'))
basket = Basket()
self.charge = self.method.calculate(basket)
def test_has_correct_charge(self):
self.assertEqual(D('10.00'), self.charge.excl_tax)
self.assertEqual(D('12.00'), self.charge.incl_tax)
def test_does_include_tax(self):
self.assertTrue(self.charge.is_tax_known)
| bsd-3-clause |
dyule/RandomBytes | ProgAsArt/Other Examples/bubblelace.py | 2 | 1065 | '''
Bubble Lace - Python.
cblouin@dal.ca
'''
# Import everything from Graphics
from graphics import *
# Import randint from random
from random import randint
# This function generate a random RGB color
def RandomColor():
return color_rgb(randint(0,255), randint(0,255), randint(0,255))
# This function draws a circle, then calls itself to draw two smaller circles inside
def Replicant(x,y,height, radius):
# Main circle
c = Circle(Point(x,y), radius)
# Random color which depends on where you are on the screen
relx = (x/600.)*255
color = color_rgb(255-relx,randint(0,255),relx)
c.setFill(color)
# Add to the window (important)
c.draw(win)
# Create circles with the circle if height is more than 0
if height > 0:
height -= 1
Replicant(x-(radius/2),y,height,radius/2)
Replicant(x+(radius/2),y,height,radius/2)
# Build a window
win = GraphWin('Bubble Lace', 600, 600)
# Start the pattern with one big circle
howdeep = 6
howbig = 290
Replicant(300,300, howdeep, howbig)
# Wait to stop (don't modify this)
win.getMouse()
win.close()
| gpl-3.0 |
chrisbjohannsen/no-gnus | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/win_tool.py | 1417 | 12751 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| gpl-3.0 |
ThatOneRoadie/stratux-cosensor | cosensor-monitor.py | 1 | 1668 | import smbus
import time
import os
import RPi.GPIO as GPIO
bus = smbus.SMBus(1)
# Set PPM limit variable to whatever would be an alarm for you
# For Uncalibrated sensors, use your better judgement or get it calibrated (390 is a fresh air outdoor baseline, depending on area).
# Sensor can also be calibrated by immersing in a pure nitrogen environment and calibrating to 10 (0 if you remove the +10 in the ppm formula below).
# My system registers car exhaust at about 1000 and Fresh air at around 390 after calibration. I've set 500 as an alarm (Baseline + 100 PPM).
# Possible future enhancement, set up a baseline on boot, then throw an alarm for a significant positive deviation from that baseline?
limit_ppm = 500
# Set up GPIO and pull Alarm Low and Silent Warning High
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(2,GPIO.OUT)
GPIO.setup(3,GPIO.OUT)
GPIO.output(2,GPIO.LOW)
GPIO.output(3,GPIO.HIGH)
# Begin the Loop!
while (1):
# ADC121C_MQ9 address, 0x50(80)
# Read data back from 0x00(00), 2 bytes
data = bus.read_i2c_block_data(0x50, 0x00, 2)
# Convert the data to 12-bits and then to ppm
raw_adc = (data[0] & 0x0F) * 256 + data[1]
ppm = (1000.0 / 4096.0) * raw_adc + 10
# Testing: Output data to screen
print "Carbon Monoxide Concentration : %.2f ppm" %ppm
#Set up Alarm loop to pull Alarm high and Silent low, but only while PPM exceeds set limit
if ppm > limit_ppm:
GPIO.output(2,GPIO.HIGH)
GPIO.output(3,GPIO.LOW)
# Testing: Write alarm status
print "Alarm is ON"
else:
GPIO.output(2,GPIO.LOW)
GPIO.output(3,GPIO.HIGH)
# Testing: Write alarm status
print "Alarm is OFF"
sleep(5)
| mit |
rue89-tech/edx-platform | common/djangoapps/student/tests/test_userstanding.py | 57 | 3823 | """
These are tests for disabling and enabling student accounts, and for making sure
that students with disabled accounts are unable to access the courseware.
"""
import unittest
from student.tests.factories import UserFactory, UserStandingFactory
from student.models import UserStanding
from django.conf import settings
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
class UserStandingTest(TestCase):
"""test suite for user standing view for enabling and disabling accounts"""
def setUp(self):
# create users
self.bad_user = UserFactory.create(
username='bad_user',
)
self.good_user = UserFactory.create(
username='good_user',
)
self.non_staff = UserFactory.create(
username='non_staff',
)
self.admin = UserFactory.create(
username='admin',
is_staff=True,
)
# create clients
self.bad_user_client = Client()
self.good_user_client = Client()
self.non_staff_client = Client()
self.admin_client = Client()
for user, client in [
(self.bad_user, self.bad_user_client),
(self.good_user, self.good_user_client),
(self.non_staff, self.non_staff_client),
(self.admin, self.admin_client),
]:
client.login(username=user.username, password='test')
UserStandingFactory.create(
user=self.bad_user,
account_status=UserStanding.ACCOUNT_DISABLED,
changed_by=self.admin
)
# set stock url to test disabled accounts' access to site
self.some_url = '/'
# since it's only possible to disable accounts from lms, we're going
# to skip tests for cms
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_disable_account(self):
self.assertEqual(
UserStanding.objects.filter(user=self.good_user).count(), 0
)
response = self.admin_client.post(reverse('disable_account_ajax'), {
'username': self.good_user.username,
'account_action': 'disable',
})
self.assertEqual(
UserStanding.objects.get(user=self.good_user).account_status,
UserStanding.ACCOUNT_DISABLED
)
def test_disabled_account_403s(self):
response = self.bad_user_client.get(self.some_url)
self.assertEqual(response.status_code, 403)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_reenable_account(self):
response = self.admin_client.post(reverse('disable_account_ajax'), {
'username': self.bad_user.username,
'account_action': 'reenable'
})
self.assertEqual(
UserStanding.objects.get(user=self.bad_user).account_status,
UserStanding.ACCOUNT_ENABLED
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_non_staff_cant_access_disable_view(self):
response = self.non_staff_client.get(reverse('manage_user_standing'), {
'user': self.non_staff,
})
self.assertEqual(response.status_code, 404)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_non_staff_cant_disable_account(self):
response = self.non_staff_client.post(reverse('disable_account_ajax'), {
'username': self.good_user.username,
'user': self.non_staff,
'account_action': 'disable'
})
self.assertEqual(response.status_code, 404)
self.assertEqual(
UserStanding.objects.filter(user=self.good_user).count(), 0
)
| agpl-3.0 |
Safihre/cherrypy | cherrypy/_cpreqbody.py | 4 | 36382 | """Request body processing for CherryPy.
.. versionadded:: 3.2
Application authors have complete control over the parsing of HTTP request
entities. In short,
:attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>`
is now always set to an instance of
:class:`RequestBody<cherrypy._cpreqbody.RequestBody>`,
and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the
:attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict.
If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the
'image' types altogether. If neither the full type nor the major type has a
matching processor, then a default processor is used
(:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the
:attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and
:attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the
request.body or the Part.
.. _custombodyprocessors:
Custom Processors
=================
You can add your own processors for any specific or major MIME type. Simply add
it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a
hook/tool that runs at ``on_start_resource`` or ``before_request_body``.
Here's the built-in JSON tool for an example::
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
'''Read application/json data into request.json.'''
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors['application/json'] = json_processor
We begin by defining a new ``json_processor`` function to stick in the
``processors`` dictionary. All processor functions take a single argument,
the ``Entity`` instance they are to process. It will be called whenever a
request is received (for those URI's where the tool is turned on) which
has a ``Content-Type`` of "application/json".
First, it checks for a valid ``Content-Length`` (raising 411 if not valid),
then reads the remaining bytes on the socket. The ``fp`` object knows its
own length, so it won't hang waiting for data that never arrives. It will
return when all data has been read. Then, we decode those bytes using
Python's built-in ``json`` module, and stick the decoded result onto
``request.json`` . If it cannot be decoded, we raise 400.
If the "force" argument is True (the default), the ``Tool`` clears the
``processors`` dict so that request entities of other ``Content-Types``
aren't parsed at all. Since there's no entry for those invalid MIME
types, the ``default_proc`` method of ``cherrypy.request.body`` is
called. But this does nothing by default (usually to provide the page
handler an opportunity to handle it.)
But in our case, we want to raise 415, so we replace
``request.body.default_proc``
with the error (``HTTPError`` instances, when called, raise themselves).
If we were defining a custom processor, we can do so without making a ``Tool``.
Just add the config entry::
request.body.processors = {'application/json': json_processor}
Note that you can only replace the ``processors`` dict wholesale this way,
not update the existing one.
"""
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = 8192
import re
import sys
import tempfile
from urllib.parse import unquote
import cheroot.server
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.lib import httputil
def unquote_plus(bs):
"""Bytes version of urllib.parse.unquote_plus."""
bs = bs.replace(b'+', b' ')
atoms = bs.split(b'%')
for i in range(1, len(atoms)):
item = atoms[i]
try:
pct = int(item[:2], 16)
atoms[i] = bytes([pct]) + item[2:]
except ValueError:
pass
return b''.join(atoms)
# ------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split(b'&'):
for pair in aparam.split(b';'):
if not pair:
continue
atoms = pair.split(b'=', 1)
if len(atoms) == 1:
atoms.append(b'')
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, 'The request entity could not be decoded. The following '
'charsets were attempted: %s' % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = ''
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match('^[ -~]{0,200}[!-~]$', ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params.
"""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# -------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
given entity is of MIME type "multipart", each part is parsed into its own
Entity instance, and the set of parts stored in
:attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`.
Between the ``before_request_body`` and ``before_handler`` tools, CherryPy
tries to process the request body (if any) by calling
:func:`request.body.process<cherrypy._cpreqbody.RequestBody.process>`.
This uses the ``content_type`` of the Entity to look up a suitable
processor in
:attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`,
a dict.
If a matching processor cannot be found for the complete Content-Type,
it tries again using the major type. For example, if a request with an
entity of type "image/jpeg" arrives, but no processor can be found for
that complete type, then one is sought for the major type "image". If a
processor is still not found, then the
:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method
of the Entity is called (which does nothing by default; you can
override this too).
CherryPy includes processors for the "application/x-www-form-urlencoded"
type, the "multipart/form-data" type, and the "multipart" major type.
CherryPy 3.2 processes these types almost exactly as older versions.
Parts are passed as arguments to the page handler using their
``Content-Disposition.name`` if given, otherwise in a generic "parts"
argument. Each such part is either a string, or the
:class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this
case it will have ``file`` and ``filename`` attributes, or possibly a
``value`` attribute). Each Part is itself a subclass of
Entity, and has its own ``process`` method and ``processors`` dict.
There is a separate processor for the "multipart" major type which is more
flexible, and simply stores all multipart parts in
:attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can
enable it with::
cherrypy.request.body.processors['multipart'] = \
_cpreqbody.process_multipart
in an ``on_start_resource`` tool.
"""
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = ['utf-8']
r"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
charset = None
"""The successful decoding; see "attempt_charsets" above."""
content_type = None
"""The value of the Content-Type request header.
If the Entity is part of a multipart payload, this will be the Content-Type
given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
filename = None
"""The ``Content-Disposition.filename`` header, if available."""
fp = None
"""The readable socket file object."""
headers = None
"""A dict of request/multipart header names and values.
This is a copy of the ``request.headers`` for the ``request.body``;
for multipart parts, it is the set of headers for that part.
"""
length = None
"""The value of the ``Content-Length`` header, if provided."""
name = None
"""The "name" parameter of the ``Content-Disposition`` header, if any."""
params = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
processors = {'application/x-www-form-urlencoded': process_urlencoded,
'multipart/form-data': process_multipart_form_data,
'multipart': process_multipart,
}
"""A dict of Content-Type names to processor methods."""
parts = None
"""A list of Part instances if ``Content-Type`` is of major type
"multipart"."""
part_class = None
"""The class used for multipart parts.
You can replace this with custom subclasses to alter the processing of
multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements('Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type
# charset
dec = self.content_type.params.get('charset', None)
if dec:
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get('Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if (
clen is not None and
'chunked' not in headers.get('Transfer-Encoding', '')
):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements('Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith('"') and self.name.endswith('"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if (
self.filename.startswith('"') and
self.filename.endswith('"')
):
self.filename = self.filename[1:-1]
if 'filename*' in disp.params:
# @see https://tools.ietf.org/html/rfc5987
encoding, lang, filename = disp.params['filename*'].split("'")
self.filename = unquote(str(filename), encoding)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def next(self):
return self.__next__()
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
value = self.decode_entity(value)
return value
def decode_entity(self, value):
"""Return a given byte encoded value as a string"""
for charset in self.attempt_charsets:
try:
value = value.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return value
else:
raise cherrypy.HTTPError(
400,
'The request entity could not be decoded. The following '
'charsets were attempted: %s' % repr(self.attempt_charsets)
)
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split('/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = ['us-ascii', 'utf-8']
r"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
boundary = None
"""The MIME multipart boundary."""
default_content_type = 'text/plain'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however (this class),
the MIME spec declares that a part with no Content-Type defaults to
"text/plain".
"""
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
"""The threshold of bytes after which point the ``Part`` will store
its data in a file (generated by
:func:`make_file<cherrypy._cprequest.Entity.make_file>`)
instead of a string. Defaults to 1000, just like the :mod:`cgi`
module in Python's standard library.
"""
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
@classmethod
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
@classmethod
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError('Illegal end of headers.')
if line == b'\r\n':
# Normal end of headers
break
if not line.endswith(b'\r\n'):
raise ValueError('MIME requires CRLF terminators: %r' % line)
if line[0] in b' \t':
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(b':', 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ', '.join((existing, v))
headers[k] = v
return headers
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and that fp is returned.
"""
endmarker = self.boundary + b'--'
delim = b''
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError('Illegal end of multipart body.')
if line.startswith(b'--') and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith(b'\r\n'):
delim = b'\r\n'
line = line[:-2]
prev_lf = True
elif line.endswith(b'\n'):
delim = b'\n'
line = line[:-1]
prev_lf = True
else:
delim = b''
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = b''.join(lines)
return result
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, bytes):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
inf = float('inf')
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE,
has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = b''
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return b''
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = b''
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, 'Maximum request length: %r' % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return b''.join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find(b'\n') + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return b''.join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in b' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(b':', 1)
except ValueError:
raise ValueError('Illegal header line.')
k = k.strip().title()
v = v.strip()
if k in cheroot.server.comma_separated_headers:
existing = self.trailers.get(k)
if existing:
v = b', '.join((existing, v))
self.trailers[k] = v
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, 'Maximum request length: %r' % e.args[1])
else:
raise
class RequestBody(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See
# https://github.com/cherrypy/cherrypy/issues/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from
the socket.
"""
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| bsd-3-clause |
Kazade/NeHe-Website | google_appengine/lib/django-1.4/django/contrib/gis/gdal/datasource.py | 92 | 4724 | """
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
# ctypes prerequisites.
from ctypes import byref
# The GDAL C library, OGR exceptions, and the Layer object.
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
# Getting the ctypes prototypes for the DataSource.
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
#### Python 'magic' routines ####
def __init__(self, ds_input, ds_driver=False, write=False):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# Registering all the drivers, this needs to be done
# _before_ we try to open up a data source.
if not capi.get_driver_count():
capi.register_all()
if isinstance(ds_input, basestring):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(ds_input, self._write, byref(ds_driver))
except OGRException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise OGRException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise OGRException('Invalid data source input type: %s' % type(ds_input))
if bool(ds):
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise OGRException('Invalid data source file "%s"' % ds_input)
def __del__(self):
"Destroys this DataStructure object."
if self._ptr: capi.destroy_ds(self._ptr)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in xrange(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, basestring):
l = capi.get_layer_by_name(self.ptr, index)
if not l: raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
l = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(l, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
return capi.get_ds_name(self._ptr)
| bsd-3-clause |
squisher/llvmlite | setup.py | 3 | 3756 | try:
from setuptools import setup, Extension
from setuptools.command.build_py import build_py as build
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
except ImportError:
from distutils.core import setup, Extension
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.spawn import spawn
import os
import sys
if os.environ.get('READTHEDOCS', None) == 'True':
sys.exit("setup.py disabled on readthedocs: called with %s"
% (sys.argv,))
from llvmlite.utils import get_library_files
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'llvmlite/_version.py'
versioneer.versionfile_build = 'llvmlite/_version.py'
versioneer.tag_prefix = 'v' # tags are like v1.2.0
versioneer.parentdir_prefix = 'llvmlite-' # dirname like 'myproject-1.2.0'
here_dir = os.path.dirname(__file__)
cmdclass = versioneer.get_cmdclass()
build = cmdclass.get('build', build)
build_ext = cmdclass.get('build_ext', build_ext)
class LlvmliteBuild(build):
def finalize_options(self):
build.finalize_options(self)
# The build isn't platform-independent
if self.build_lib == self.build_purelib:
self.build_lib = self.build_platlib
def get_sub_commands(self):
# Force "build_ext" invocation.
commands = build.get_sub_commands(self)
for c in commands:
if c == 'build_ext':
return commands
return ['build_ext'] + commands
class LlvmliteBuildExt(build_ext):
def run(self):
build_ext.run(self)
cmd = [sys.executable, os.path.join(here_dir, 'ffi', 'build.py')]
spawn(cmd, dry_run=self.dry_run)
# HACK: this makes sure the library file (which is large) is only
# included in binary builds, not source builds.
self.distribution.package_data = {
"llvmlite.binding": get_library_files(),
}
class LlvmliteInstall(install):
# Ensure install see the libllvmlite shared library
# This seems to only be necessary on OSX.
def run(self):
self.distribution.package_data = {
"llvmlite.binding": get_library_files(),
}
install.run(self)
cmdclass.update({'build': LlvmliteBuild,
'build_ext': LlvmliteBuildExt,
'install': LlvmliteInstall,
})
packages = ['llvmlite',
'llvmlite.binding',
'llvmlite.ir',
'llvmlite.llvmpy',
'llvmlite.tests',
]
install_requires = []
if sys.version_info < (3, 4):
install_requires.append('enum34')
setup(name='llvmlite',
description="lightweight wrapper around basic LLVM functionality",
version=versioneer.get_version(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
],
# Include the separately-compiled shared library
author="Continuum Analytics, Inc.",
author_email="numba-users@continuum.io",
url="http://llvmlite.pydata.org",
download_url="https://github.com/numba/llvmlite",
packages=packages,
install_requires=install_requires,
license="BSD",
cmdclass=cmdclass,
)
| bsd-2-clause |
mgoffin/osquery | tools/tests/utils.py | 1 | 5567 | #!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import sys
import psutil
import time
import subprocess
import re
def red(msg):
return "\033[41m\033[1;30m %s \033[0m" % str(msg)
def lightred(msg):
return "\033[1;31m%s\033[0m" % str(msg)
def yellow(msg):
return "\033[43m\033[1;30m %s \033[0m" % str(msg)
def green(msg):
return "\033[42m\033[1;30m %s \033[0m" % str(msg)
def blue(msg):
return "\033[46m\033[1;30m %s \033[0m" % str(msg)
def read_config(path):
with open(path, "r") as fh:
return json.loads(fh.read())
def write_config(data={}, path=None):
if path is None:
path = data["options"]["config_path"]
with open(path, "w") as fh:
fh.write(json.dumps(data))
def platform():
platform = sys.platform
if platform.find("linux") == 0:
platform = "linux"
if platform.find("freebsd") == 0:
platform = "freebsd"
return platform
def queries_from_config(config_path):
config = {}
rmcomment = re.compile('\/\*[\*A-Za-z0-9\n\s\.\{\}\'\/\\\:]+\*/|//.*')
try:
with open(config_path, "r") as fh:
configcontent = fh.read()
content = rmcomment.sub('',configcontent)
config = json.loads(content)
except Exception as e:
print("Cannot open/parse config: %s" % str(e))
exit(1)
queries = {}
if "scheduledQueries" in config:
for query in config["scheduledQueries"]:
queries[query["name"]] = query["query"]
if "schedule" in config:
for name, details in config["schedule"].iteritems():
queries[name] = details["query"]
if "packs" in config:
for keys,values in config["packs"].iteritems():
with open(values) as fp:
packfile = fp.read()
packcontent = rmcomment.sub('',packfile)
packqueries = json.loads(packcontent)
for queryname,query in packqueries["queries"].iteritems():
queries["pack_"+queryname] = query["query"]
pass
if len(queries) == 0:
print("Could not find a schedule/queries in config: %s" % config_path)
exit(0)
return queries
def queries_from_tables(path, restrict):
"""Construct select all queries from all tables."""
# Let the caller limit the tables
restrict_tables = [t.strip() for t in restrict.split(",")]
spec_platform = platform()
tables = []
for base, _, files in os.walk(path):
for spec in files:
if spec[0] == '.' or spec in ["blacklist"]:
continue
spec_platform = os.path.basename(base)
table_name = spec.split(".table", 1)[0]
if spec_platform not in ["specs", platform()]:
continue
# Generate all tables to select from, with abandon.
tables.append("%s.%s" % (spec_platform, table_name))
if len(restrict) > 0:
tables = [t for t in tables if t.split(".")[1] in restrict_tables]
queries = {}
for table in tables:
queries[table] = "SELECT * FROM %s;" % table.split(".", 1)[1]
return queries
def get_stats(p, interval=1):
"""Run psutil and downselect the information."""
utilization = p.cpu_percent(interval=interval)
return {
"utilization": utilization,
"counters": p.io_counters() if platform() != "darwin" else None,
"fds": p.num_fds(),
"cpu_times": p.cpu_times(),
"memory": p.memory_info_ex(),
}
def profile_cmd(cmd, proc=None, shell=False, timeout=0, count=1):
start_time = time.time()
if proc is None:
proc = subprocess.Popen(cmd,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p = psutil.Process(pid=proc.pid)
delay = 0
step = 0.5
percents = []
# Calculate the CPU utilization in intervals of 1 second.
stats = {}
while p.is_running() and p.status() != psutil.STATUS_ZOMBIE:
try:
current_stats = get_stats(p, step)
if (current_stats["memory"].rss == 0):
break
stats = current_stats
percents.append(stats["utilization"])
except psutil.AccessDenied:
break
delay += step
if timeout > 0 and delay >= timeout + 2:
proc.kill()
break
duration = time.time() - start_time - 2
utilization = [percent for percent in percents if percent != 0]
if len(utilization) == 0:
avg_utilization = 0
else:
avg_utilization = sum(utilization) / len(utilization)
if len(stats.keys()) == 0:
raise Exception("No stats recorded, perhaps binary returns -1?")
return {
"utilization": avg_utilization,
"duration": duration,
"memory": stats["memory"].rss,
"user_time": stats["cpu_times"].user,
"system_time": stats["cpu_times"].system,
"cpu_time": stats["cpu_times"].user + stats["cpu_times"].system,
"fds": stats["fds"],
"exit": p.wait(),
}
| bsd-3-clause |
sstrigger/Sick-Beard | sickbeard/metadata/xbmc_12plus.py | 8 | 14618 | # URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import generic
import datetime
from lib.tvdb_api import tvdb_api, tvdb_exceptions
import sickbeard
from sickbeard import logger, exceptions, helpers
from sickbeard.exceptions import ex
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
class XBMC_12PlusMetadata(generic.GenericMetadata):
"""
Metadata generation class for XBMC 12+.
The following file structure is used:
show_root/tvshow.nfo (show metadata)
show_root/fanart.jpg (fanart)
show_root/poster.jpg (poster)
show_root/banner.jpg (banner)
show_root/Season ##/filename.ext (*)
show_root/Season ##/filename.nfo (episode metadata)
show_root/Season ##/filename-thumb.jpg (episode thumb)
show_root/season##-poster.jpg (season posters)
show_root/season##-banner.jpg (season banners)
show_root/season-all-poster.jpg (season all poster)
show_root/season-all-banner.jpg (season all banner)
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'XBMC 12+'
self.poster_name = "poster.jpg"
self.season_all_poster_name = "season-all-poster.jpg"
# web-ui metadata template
self.eg_show_metadata = "tvshow.nfo"
self.eg_episode_metadata = "Season##\\<i>filename</i>.nfo"
self.eg_fanart = "fanart.jpg"
self.eg_poster = "poster.jpg"
self.eg_banner = "banner.jpg"
self.eg_episode_thumbnails = "Season##\\<i>filename</i>-thumb.jpg"
self.eg_season_posters = "season##-poster.jpg"
self.eg_season_banners = "season##-banner.jpg"
self.eg_season_all_poster = "season-all-poster.jpg"
self.eg_season_all_banner = "season-all-banner.jpg"
def _show_data(self, show_obj):
"""
Creates an elementTree XML structure for an XBMC-style tvshow.nfo and
returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
show_ID = show_obj.tvdbid
tvdb_lang = show_obj.lang
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
tv_node = etree.Element("tvshow")
try:
myShow = t[int(show_ID)]
except tvdb_exceptions.tvdb_shownotfound:
logger.log(u"Unable to find show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
raise
except tvdb_exceptions.tvdb_error:
logger.log(u"TVDB is down, can't use its data to add this show", logger.ERROR)
raise
# check for title and id
try:
if myShow["seriesname"] is None or myShow["seriesname"] == "" or myShow["id"] is None or myShow["id"] == "":
logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
return False
except tvdb_exceptions.tvdb_attributenotfound:
logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
return False
title = etree.SubElement(tv_node, "title")
if myShow["seriesname"] is not None:
title.text = myShow["seriesname"]
rating = etree.SubElement(tv_node, "rating")
if myShow["rating"] is not None:
rating.text = myShow["rating"]
year = etree.SubElement(tv_node, "year")
if myShow["firstaired"] is not None:
try:
year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year)
if year_text:
year.text = year_text
except:
pass
plot = etree.SubElement(tv_node, "plot")
if myShow["overview"] is not None:
plot.text = myShow["overview"]
episodeguide = etree.SubElement(tv_node, "episodeguide")
episodeguideurl = etree.SubElement(episodeguide, "url")
episodeguideurl2 = etree.SubElement(tv_node, "episodeguideurl")
if myShow["id"] is not None:
showurl = sickbeard.TVDB_BASE_URL + '/series/' + myShow["id"] + '/all/en.zip'
episodeguideurl.text = showurl
episodeguideurl2.text = showurl
mpaa = etree.SubElement(tv_node, "mpaa")
if myShow["contentrating"] is not None:
mpaa.text = myShow["contentrating"]
tvdbid = etree.SubElement(tv_node, "id")
if myShow["id"] is not None:
tvdbid.text = myShow["id"]
genre = etree.SubElement(tv_node, "genre")
if myShow["genre"] is not None:
genre.text = " / ".join([x.strip() for x in myShow["genre"].split('|') if x and x.strip()])
premiered = etree.SubElement(tv_node, "premiered")
if myShow["firstaired"] is not None:
premiered.text = myShow["firstaired"]
studio = etree.SubElement(tv_node, "studio")
if myShow["network"] is not None:
studio.text = myShow["network"]
if myShow["_actors"] is not None:
for actor in myShow["_actors"]:
cur_actor_name_text = actor['name']
if cur_actor_name_text is not None and cur_actor_name_text.strip():
cur_actor = etree.SubElement(tv_node, "actor")
cur_actor_name = etree.SubElement(cur_actor, "name")
cur_actor_name.text = cur_actor_name_text.strip()
cur_actor_role = etree.SubElement(cur_actor, "role")
cur_actor_role_text = actor['role']
if cur_actor_role_text is not None:
cur_actor_role.text = cur_actor_role_text
cur_actor_thumb = etree.SubElement(cur_actor, "thumb")
cur_actor_thumb_text = actor['image']
if cur_actor_thumb_text is not None:
cur_actor_thumb.text = cur_actor_thumb_text
# Make it purdy
helpers.indentXML(tv_node)
data = etree.ElementTree(tv_node)
return data
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for an XBMC-style episode.nfo and
returns the resulting data object.
show_obj: a TVEpisode instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
try:
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(e.message)
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR)
return
if len(eps_to_write) > 1:
rootNode = etree.Element("xbmcmultiepisode")
else:
rootNode = etree.Element("episodedetails")
# write an NFO containing info for all matching episodes
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if not myEp["firstaired"]:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if not myEp["episodename"]:
logger.log(u"Not generating nfo because the ep has no title", logger.DEBUG)
return None
logger.log(u"Creating metadata for episode " + str(ep_obj.season) + "x" + str(ep_obj.episode), logger.DEBUG)
if len(eps_to_write) > 1:
episode = etree.SubElement(rootNode, "episodedetails")
else:
episode = rootNode
title = etree.SubElement(episode, "title")
if curEpToWrite.name is not None:
title.text = curEpToWrite.name
showtitle = etree.SubElement(episode, "showtitle")
if curEpToWrite.show.name is not None:
showtitle.text = curEpToWrite.show.name
season = etree.SubElement(episode, "season")
season.text = str(curEpToWrite.season)
episodenum = etree.SubElement(episode, "episode")
episodenum.text = str(curEpToWrite.episode)
uniqueid = etree.SubElement(episode, "uniqueid")
uniqueid.text = str(curEpToWrite.tvdbid)
aired = etree.SubElement(episode, "aired")
if curEpToWrite.airdate != datetime.date.fromordinal(1):
aired.text = str(curEpToWrite.airdate)
else:
aired.text = ''
plot = etree.SubElement(episode, "plot")
if curEpToWrite.description is not None:
plot.text = curEpToWrite.description
runtime = etree.SubElement(episode, "runtime")
if curEpToWrite.season != 0:
if myShow["runtime"] is not None:
runtime.text = myShow["runtime"]
displayseason = etree.SubElement(episode, "displayseason")
if 'airsbefore_season' in myEp:
displayseason_text = myEp['airsbefore_season']
if displayseason_text is not None:
displayseason.text = displayseason_text
displayepisode = etree.SubElement(episode, "displayepisode")
if 'airsbefore_episode' in myEp:
displayepisode_text = myEp['airsbefore_episode']
if displayepisode_text is not None:
displayepisode.text = displayepisode_text
thumb = etree.SubElement(episode, "thumb")
thumb_text = myEp['filename']
if thumb_text is not None:
thumb.text = thumb_text
watched = etree.SubElement(episode, "watched")
watched.text = 'false'
credits = etree.SubElement(episode, "credits")
credits_text = myEp['writer']
if credits_text is not None:
credits.text = credits_text
director = etree.SubElement(episode, "director")
director_text = myEp['director']
if director_text is not None:
director.text = director_text
rating = etree.SubElement(episode, "rating")
rating_text = myEp['rating']
if rating_text is not None:
rating.text = rating_text
gueststar_text = myEp['gueststars']
if gueststar_text is not None:
for actor in (x.strip() for x in gueststar_text.split('|') if x and x.strip()):
cur_actor = etree.SubElement(episode, "actor")
cur_actor_name = etree.SubElement(cur_actor, "name")
cur_actor_name.text = actor
if myShow['_actors'] is not None:
for actor in myShow['_actors']:
cur_actor_name_text = actor['name']
if cur_actor_name_text is not None and cur_actor_name_text.strip():
cur_actor = etree.SubElement(episode, "actor")
cur_actor_name = etree.SubElement(cur_actor, "name")
cur_actor_name.text = cur_actor_name_text.strip()
cur_actor_role = etree.SubElement(cur_actor, "role")
cur_actor_role_text = actor['role']
if cur_actor_role_text is not None:
cur_actor_role.text = cur_actor_role_text
cur_actor_thumb = etree.SubElement(cur_actor, "thumb")
cur_actor_thumb_text = actor['image']
if cur_actor_thumb_text is not None:
cur_actor_thumb.text = cur_actor_thumb_text
# Make it purdy
helpers.indentXML(rootNode)
data = etree.ElementTree(rootNode)
return data
# present a standard "interface" from the module
metadata_class = XBMC_12PlusMetadata
| gpl-3.0 |
vivekananda/fbeats | django/contrib/webdesign/templatetags/webdesign.py | 91 | 2167 | from django.contrib.webdesign.lorem_ipsum import words, paragraphs
from django import template
register = template.Library()
class LoremNode(template.Node):
def __init__(self, count, method, common):
self.count, self.method, self.common = count, method, common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return u'\n\n'.join(paras)
@register.tag
def lorem(parser, token):
"""
Creates random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` will output the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` will output the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` will output two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != 'random'
if not common:
bits.pop()
# Method bit
if bits[-1] in ('w', 'p', 'b'):
method = bits.pop()
else:
method = 'b'
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = '1'
count = parser.compile_filter(count)
if len(bits) != 1:
raise template.TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
| bsd-3-clause |
mikewiebe-ansible/ansible | lib/ansible/modules/network/fortios/fortios_firewall_address6_template.py | 13 | 13356 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_address6_template
short_description: Configure IPv6 address templates in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and address6_template category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_address6_template:
description:
- Configure IPv6 address templates.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
ip6:
description:
- IPv6 address prefix.
type: str
name:
description:
- IPv6 address template name.
required: true
type: str
subnet_segment:
description:
- IPv6 subnet segments.
type: list
suboptions:
bits:
description:
- Number of bits.
type: int
exclusive:
description:
- Enable/disable exclusive value.
type: str
choices:
- enable
- disable
id:
description:
- Subnet segment ID.
required: true
type: int
name:
description:
- Subnet segment name.
type: str
values:
description:
- Subnet segment values.
type: list
suboptions:
name:
description:
- Subnet segment value name.
required: true
type: str
value:
description:
- Subnet segment value.
type: str
subnet_segment_count:
description:
- Number of IPv6 subnet segments.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv6 address templates.
fortios_firewall_address6_template:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_address6_template:
ip6: "<your_own_value>"
name: "default_name_4"
subnet_segment:
-
bits: "6"
exclusive: "enable"
id: "8"
name: "default_name_9"
values:
-
name: "default_name_11"
value: "<your_own_value>"
subnet_segment_count: "13"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_address6_template_data(json):
option_list = ['ip6', 'name', 'subnet_segment',
'subnet_segment_count']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_address6_template(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_address6_template'] and data['firewall_address6_template']:
state = data['firewall_address6_template']['state']
else:
state = True
firewall_address6_template_data = data['firewall_address6_template']
filtered_data = underscore_to_hyphen(filter_firewall_address6_template_data(firewall_address6_template_data))
if state == "present":
return fos.set('firewall',
'address6-template',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'address6-template',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_address6_template']:
resp = firewall_address6_template(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_address6_template": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"ip6": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"subnet_segment": {"required": False, "type": "list",
"options": {
"bits": {"required": False, "type": "int"},
"exclusive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"},
"values": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"},
"value": {"required": False, "type": "str"}
}}
}},
"subnet_segment_count": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
alqfahad/odoo | addons/stock_picking_wave/wizard/picking_to_wave.py | 382 | 1624 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_picking_to_wave(osv.osv_memory):
_name = 'stock.picking.to.wave'
_description = 'Add pickings to a picking wave'
_columns = {
'wave_id': fields.many2one('stock.picking.wave', 'Picking Wave', required=True),
}
def attach_pickings(self, cr, uid, ids, context=None):
#use active_ids to add picking line to the selected wave
wave_id = self.browse(cr, uid, ids, context=context)[0].wave_id.id
picking_ids = context.get('active_ids', False)
return self.pool.get('stock.picking').write(cr, uid, picking_ids, {'wave_id': wave_id}) | agpl-3.0 |
paran0ids0ul/infernal-twin | build/reportlab/tests/test_paragraphs.py | 13 | 32361 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
# tests some paragraph styles
__version__='''$Id$'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import unittest
from reportlab.platypus import Paragraph, SimpleDocTemplate, XBox, Indenter, XPreformatted, PageBreak, Spacer
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.abag import ABag
from reportlab.lib.colors import red, black, navy, white, green
from reportlab.lib.randomtext import randomText
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.rl_config import defaultPageSize, rtlSupport
from reportlab.pdfbase import ttfonts
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.fonts import addMapping, tt2ps
(PAGE_WIDTH, PAGE_HEIGHT) = defaultPageSize
def myFirstPage(canvas, doc):
canvas.saveState()
canvas.setStrokeColor(red)
canvas.setLineWidth(5)
canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Bold',24)
canvas.drawString(108, PAGE_HEIGHT-54, "TESTING PARAGRAPH STYLES")
canvas.setFont('Times-Roman',12)
canvas.drawString(4 * inch, 0.75 * inch, "First Page")
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setStrokeColor(red)
canvas.setLineWidth(5)
canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Roman',12)
canvas.drawString(4 * inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def getAFont():
'''register a font that supports most Unicode characters'''
I = []
font_name = 'DejaVuSans'
I.append([(font_name, 0, 0, font_name),
(font_name, 1, 0, font_name + '-Bold'),
(font_name, 0, 1, font_name + '-Oblique'),
(font_name, 1, 1, font_name + '-BoldOblique'),
])
font_name = 'FreeSerif'
I.append([(font_name, 0, 0, font_name),
(font_name, 1, 0, font_name + 'Bold'),
(font_name, 0, 1, font_name + 'Italic'),
(font_name, 1, 1, font_name + 'BoldItalic'),
])
for info in I:
n = 0
for font in info:
fontName = font[3]
try:
pdfmetrics.registerFont(ttfonts.TTFont(fontName,fontName + '.ttf'))
addMapping(*font)
n += 1
except:
pass
if n==4: return font[0]
raise ValueError('could not find suitable font')
class ParagraphTestCase(unittest.TestCase):
"Test Paragraph class (eyeball-test)."
def test0(self):
"""Test...
The story should contain...
Features to be visually confirmed by a human being are:
1. ...
2. ...
3. ...
"""
story = []
SA = story.append
#need a style
styNormal = ParagraphStyle('normal')
styGreen = ParagraphStyle('green',parent=styNormal,textColor=green)
styDots = ParagraphStyle('styDots',parent=styNormal,endDots='.')
styDots1 = ParagraphStyle('styDots1',parent=styNormal,endDots=ABag(text=' -',dy=2,textColor='red'))
styDotsR = ParagraphStyle('styDotsR',parent=styNormal,alignment=TA_RIGHT,endDots=' +')
styDotsC = ParagraphStyle('styDotsC',parent=styNormal,alignment=TA_CENTER,endDots=' *')
styDotsJ = ParagraphStyle('styDotsJ',parent=styNormal,alignment=TA_JUSTIFY,endDots=' =')
istyDots = ParagraphStyle('istyDots',parent=styNormal,firstLineIndent=12,leftIndent=6,endDots='.')
istyDots1 = ParagraphStyle('istyDots1',parent=styNormal,firstLineIndent=12,leftIndent=6,endDots=ABag(text=' -',dy=2,textColor='red'))
istyDotsR = ParagraphStyle('istyDotsR',parent=styNormal,firstLineIndent=12,leftIndent=6,alignment=TA_RIGHT,endDots=' +')
istyDotsC = ParagraphStyle('istyDotsC',parent=styNormal,firstLineIndent=12,leftIndent=6,alignment=TA_CENTER,endDots=' *')
istyDotsJ = ParagraphStyle('istyDotsJ',parent=styNormal,firstLineIndent=12,leftIndent=6,alignment=TA_JUSTIFY,endDots=' =')
styNormalCJK = ParagraphStyle('normal',wordWrap='CJK')
styDotsCJK = ParagraphStyle('styDots',parent=styNormalCJK,endDots='.')
styDots1CJK = ParagraphStyle('styDots1',parent=styNormalCJK,endDots=ABag(text=' -',dy=2,textColor='red'))
styDotsRCJK = ParagraphStyle('styDotsR',parent=styNormalCJK,alignment=TA_RIGHT,endDots=' +')
styDotsCCJK = ParagraphStyle('styDotsC',parent=styNormalCJK,alignment=TA_CENTER,endDots=' *')
styDotsJCJK = ParagraphStyle('styDotsJ',parent=styNormalCJK,alignment=TA_JUSTIFY,endDots=' =')
istyDotsCJK = ParagraphStyle('istyDots',parent=styNormalCJK,firstLineIndent=12,leftIndent=6,endDots='.')
istyDots1CJK = ParagraphStyle('istyDots1',parent=styNormalCJK,firstLineIndent=12,leftIndent=6,endDots=ABag(text=' -',dy=2,textColor='red'))
istyDotsRCJK = ParagraphStyle('istyDotsR',parent=styNormalCJK,firstLineIndent=12,leftIndent=6,alignment=TA_RIGHT,endDots=' +')
istyDotsCCJK = ParagraphStyle('istyDotsC',parent=styNormalCJK,firstLineIndent=12,leftIndent=6,alignment=TA_CENTER,endDots=' *')
istyDotsJCJK = ParagraphStyle('istyDotsJ',parent=styNormalCJK,firstLineIndent=12,leftIndent=6,alignment=TA_JUSTIFY,endDots=' =')
# some to test
stySpaced = ParagraphStyle('spaced',
parent=styNormal,
spaceBefore=12,
spaceAfter=12)
SA(Paragraph("This is a normal paragraph. "+ randomText(), styNormal))
SA(Paragraph("There follows a paragraph with only \"<br/>\"", styNormal))
SA(Paragraph("<br/>", styNormal))
SA(Paragraph("This has 12 points space before and after, set in the style. " + randomText(), stySpaced))
SA(Paragraph("This is normal. " + randomText(), styNormal))
SA(Paragraph("""<para spacebefore="12" spaceafter="12">
This has 12 points space before and after, set inline with
XML tag. It works too.""" + randomText() + "</para>",
styNormal))
SA(Paragraph("This is normal. " + randomText(), styNormal))
styBackground = ParagraphStyle('MyTitle',
fontName='Helvetica-Bold',
fontSize=24,
leading=28,
textColor=white,
backColor=navy)
SA(Paragraph("This is a title with a background. ", styBackground))
SA(Paragraph("""<para backcolor="pink">This got a background from the para tag</para>""", styNormal))
SA(Paragraph("""<para>\n\tThis has newlines and tabs on the front but inside the para tag</para>""", styNormal))
SA(Paragraph("""<para> This has spaces on the front but inside the para tag</para>""", styNormal))
SA(Paragraph("""\n\tThis has newlines and tabs on the front but no para tag""", styNormal))
SA(Paragraph(""" This has spaces on the front but no para tag""", styNormal))
SA(Paragraph("""This has <font color=blue backcolor=pink>blue text with pink background</font> here.""", styNormal))
SA(Paragraph("""<span color=blue backcolor=pink> Nothing but blue text with pink background. </span>""", styNormal))
SA(Paragraph("""This has <i>italic text</i> here.""", styNormal))
SA(Paragraph("""This has <b>bold text</b> here.""", styNormal))
SA(Paragraph("""This has <u>underlined text</u> here.""", styNormal))
SA(Paragraph("""This has <font color=blue><u>blue and <font color=red>red</font> underlined text</u></font> here.""", styNormal))
SA(Paragraph("""<u>green underlining</u>""", styGreen))
SA(Paragraph("""<u>green <font size="+4"><i>underlining</i></font></u>""", styGreen))
SA(Paragraph("""This has m<super>2</super> a superscript.""", styNormal))
SA(Paragraph("""This has m<sub>2</sub> a subscript. Like H<sub>2</sub>O!""", styNormal))
SA(Paragraph("""This has a font change to <font name=Helvetica>Helvetica</font>.""", styNormal))
#This one fails:
#SA(Paragraph("""This has a font change to <font name=Helvetica-Oblique>Helvetica-Oblique</font>.""", styNormal))
SA(Paragraph("""This has a font change to <font name=Helvetica><i>Helvetica in italics</i></font>.""", styNormal))
SA(Paragraph('''This one uses upper case tags and has set caseSensitive=0: Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''', styNormal, caseSensitive=0))
SA(Paragraph('''The same as before, but has set not set caseSensitive, thus the tags are ignored: Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''', styNormal))
SA(Paragraph('''This one uses fonts with size "14pt" and also uses the em and strong tags: Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''', styNormal, caseSensitive=0))
SA(Paragraph('''This uses a font size of 3cm: Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''', styNormal, caseSensitive=0))
SA(Paragraph('''This is just a very long silly text to see if the <FONT face="Courier">caseSensitive</FONT> flag also works if the paragraph is <EM>very</EM> long. '''*20, styNormal, caseSensitive=0))
SA(Indenter("1cm"))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2'>1.1</bullet>sample bullet default anchor</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2'>1.22</bullet>sample bullet default anchor</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='start'>1.1</bullet>sample bullet start align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='start'>1.22</bullet>sample bullet start align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='middle'>1.1</bullet>sample bullet middle align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='middle'>1.22</bullet>sample bullet middle align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='end'>1.1</bullet>sample bullet end align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='end'>1.22</bullet>sample bullet end align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='numeric'>1.1</bullet>sample bullet numeric align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='numeric'>1.22</bullet>sample bullet numeric align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-0.7cm' bulletOffsetY='2' anchor='numeric'><span color='red'>1</span><span color='green'>.</span><span color='blue'>3</span></bullet>sample bullet numeric align</para>", styNormal))
SA(Paragraph("<para><bullet bulletIndent='-1cm' bulletOffsetY='2'><seq id='s0'/>)</bullet>Indented list bulletOffsetY=2. %s</para>" % randomText(), styNormal))
SA(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
SA(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
SA(Indenter("1cm"))
SA(XPreformatted("<para leftIndent='0.5cm' backcolor=pink><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
SA(XPreformatted("<para leftIndent='0.5cm' backcolor=palegreen><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
SA(Indenter("-1cm"))
SA(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
SA(Indenter("-1cm"))
SA(Paragraph("<para>Indented list using seqChain/Format<seqChain order='s0 s1 s2 s3 s4'/><seqReset id='s0'/><seqFormat id='s0' value='1'/><seqFormat id='s1' value='a'/><seqFormat id='s2' value='i'/><seqFormat id='s3' value='A'/><seqFormat id='s4' value='I'/></para>", stySpaced))
SA(Indenter("1cm"))
SA(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
SA(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
SA(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
SA(Indenter("1cm"))
SA(XPreformatted("<para backcolor=pink boffsety='-3'><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list bulletOffsetY=-3.</para>", styNormal))
SA(XPreformatted("<para backcolor=pink><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
SA(Indenter("-1cm"))
SA(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
SA(Indenter("1cm"))
SA(XPreformatted("<para backcolor=palegreen><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
SA(Indenter("1cm"))
SA(XPreformatted("<para><bullet bulletIndent='-1cm'><seq id='s2'/>)</bullet>Indented list. line1</para>", styNormal))
SA(XPreformatted("<para><bullet bulletIndent='-1cm'><seq id='s2'/>)</bullet>Indented list. line2</para>", styNormal))
SA(Indenter("-1cm"))
SA(XPreformatted("<para backcolor=palegreen><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
SA(Indenter("-1cm"))
SA(Indenter("-1cm"))
for i in range(2):
SA(PageBreak())
SA(Paragraph('''%s dotted paragraphs''' % (i and 'CJK' or 'Normal'), styNormal))
SA(Paragraph('''Simple paragraph with dots''', i and styDotsCJK or styDots))
SA(Paragraph('''Simple indented paragraph with dots''', i and istyDotsCJK or istyDots))
SA(Paragraph('''Simple centred paragraph with stars''', i and styDotsCCJK or styDotsC))
SA(Paragraph('''Simple centred indented paragraph with stars''', i and istyDotsCCJK or istyDotsC))
SA(Paragraph('''Simple right justified paragraph with pluses, but no pluses''', i and styDotsRCJK or styDotsR))
SA(Paragraph('''Simple right justified indented paragraph with pluses, but no pluses''', i and istyDotsRCJK or istyDotsR))
SA(Paragraph('''Simple justified paragraph with equals''', i and styDotsJCJK or styDotsJ))
SA(Paragraph('''Simple justified indented paragraph with equals''', i and istyDotsJCJK or istyDotsJ))
SA(Paragraph('''A longer simple paragraph with dots''', i and styDotsCJK or styDots))
SA(Paragraph('''A longer simple indented paragraph with dots''', i and istyDotsCJK or istyDots))
SA(Paragraph('A very much' +50*' longer'+' simple paragraph with dots', i and styDotsCJK or styDots))
SA(Paragraph('A very much' +50*' longer'+' simple indented paragraph with dots', i and istyDotsCJK or istyDots))
SA(Paragraph('A very much' +50*' longer'+' centred simple paragraph with stars', i and styDotsCCJK or styDotsC))
SA(Paragraph('A very much' +50*' longer'+' centred simple indented paragraph with stars', i and istyDotsCCJK or istyDotsC))
SA(Paragraph('A very much' +50*' longer'+' right justified simple paragraph with pluses, but no pluses', i and styDotsRCJK or styDotsR))
SA(Paragraph('A very much' +50*' longer'+' right justified simple indented paragraph with pluses, but no pluses', i and istyDotsRCJK or istyDotsR))
SA(Paragraph('A very much' +50*' longer'+' justified simple paragraph with equals', i and styDotsJCJK or styDotsJ))
SA(Paragraph('A very much' +50*' longer'+' justified simple indented paragraph with equals', i and istyDotsJCJK or istyDotsJ))
SA(Paragraph('''Simple paragraph with dashes that have a dy and a textColor.''', i and styDots1CJK or styDots1))
SA(Paragraph('''Simple indented paragraph with dashes that have a dy and a textColor.''', i and istyDots1CJK or istyDots1))
SA(Paragraph('''Complex <font color="green">paragraph</font> with dots''', i and styDotsCJK or styDots))
SA(Paragraph('''Complex <font color="green">indented paragraph</font> with dots''', i and istyDotsCJK or istyDots))
SA(Paragraph('''Complex centred <font color="green">paragraph</font> with stars''', i and styDotsCCJK or styDotsC))
SA(Paragraph('''Complex centred <font color="green">indented paragraph</font> with stars''', i and istyDotsCCJK or istyDotsC))
SA(Paragraph('''Complex right justfied <font color="green">paragraph</font> with pluses, but no pluses''', i and styDotsRCJK or styDotsR))
SA(Paragraph('''Complex right justfied <font color="green">indented paragraph</font> with pluses, but no pluses''', i and istyDotsRCJK or istyDotsR))
SA(Paragraph('''Complex justfied <font color="green">paragraph</font> with equals''', i and styDotsJCJK or styDotsJ))
SA(Paragraph('''Complex justfied <font color="green">indented paragraph</font> with equals''', i and istyDotsJCJK or istyDotsJ))
SA(Paragraph('''A longer complex <font color="green">paragraph</font> with dots''', i and styDotsCJK or styDots))
SA(Paragraph('''A longer complex <font color="green">indented paragraph</font> with dots''', i and istyDotsCJK or istyDots))
SA(Paragraph('A very much' +50*' longer'+' complex <font color="green">paragraph</font> with dots', i and styDotsCJK or styDots))
SA(Paragraph('A very much' +50*' longer'+' complex <font color="green">indented paragraph</font> with dots', i and istyDotsCJK or istyDots))
SA(Paragraph('''Complex <font color="green">paragraph</font> with dashes that have a dy and a textColor.''', i and styDots1CJK or styDots1))
SA(Paragraph('''Complex <font color="green">indented paragraph</font> with dashes that have a dy and a textColor.''', i and istyDots1CJK or istyDots1))
SA(Paragraph('A very much' +50*' longer'+' centred complex <font color="green">paragraph</font> with stars', i and styDotsCCJK or styDotsC))
SA(Paragraph('A very much' +50*' longer'+' centred complex <font color="green">indented paragraph</font> with stars', i and istyDotsCCJK or istyDotsC))
SA(Paragraph('A very much' +50*' longer'+' right justified <font color="green">complex</font> paragraph with pluses, but no pluses', i and styDotsRCJK or styDotsR))
SA(Paragraph('A very much' +50*' longer'+' right justified <font color="green">complex</font> indented paragraph with pluses, but no pluses', i and istyDotsRCJK or istyDotsR))
SA(Paragraph('A very much' +50*' longer'+' justified complex <font color="green">paragraph</font> with equals', i and styDotsJCJK or styDotsJ))
SA(Paragraph('A very much' +50*' longer'+' justified complex <font color="green">indented paragraph</font> with equals', i and istyDotsJCJK or istyDotsJ))
template = SimpleDocTemplate(outputfile('test_paragraphs.pdf'),
showBoundary=1)
template.build(story,
onFirstPage=myFirstPage, onLaterPages=myLaterPages)
if rtlSupport:
def testBidi(self):
fontName = getAFont()
# create styles based on the registered font
stySTD = ParagraphStyle('STD', fontName = fontName)
styRJ = ParagraphStyle('RJ', parent=stySTD, alignment=TA_RIGHT)
styLTR = ParagraphStyle('LTR', parent=stySTD, wordWrap='LTR')
styRTL = ParagraphStyle('RTL', parent = stySTD, alignment = TA_RIGHT,
wordWrap = 'RTL', spaceAfter = 12)
# strings for testing Normal & LTR styles
ltrStrings = [# English followed by Arabic.
b'English followed by \xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a.',
# English with Arabic in the middle
b'English with \xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a in the middle.',
# English symbols (!@#$%^&*) Arabic
b'English symbols (!@#$%^&*) \xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a.',
# ((testing integers in LTR))
b'123 LTR 123 Integers 123.',
# ((testing decimals in LTR))
b'456.78 LTR 456.78 Decimals 456.78.',
# Long English text with RTL script in the middle, splitting over multiple lines
b'Long \xd8\xb7\xd9\x88\xd9\x8a\xd9\x84 English text'
b' \xd9\x86\xd8\xb5 \xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a with RTL script'
b' \xd9\x83\xd8\xaa\xd8\xa7\xd8\xa8\xd8\xa9 \xd9\x85\xd9\x86'
b' \xd8\xa7\xd9\x84\xd9\x8a\xd9\x85\xd9\x8a\xd9\x86 \xd8\xa5\xd9\x84\xd9\x89'
b' \xd8\xa7\xd9\x84\xd9\x8a\xd8\xb3\xd8\xa7\xd8\xb1 in the middle,'
b' \xd9\x81\xd9\x8a \xd8\xa7\xd9\x84\xd9\x88\xd8\xb3\xd8\xb7\xd8\x8c'
b' splitting \xd9\x85\xd9\x82\xd8\xb3\xd9\x85 over \xd8\xb9\xd9\x84\xd9\x89'
b' multiple lines \xd8\xb9\xd8\xaf\xd8\xa9 \xd8\xb3\xd8\xb7\xd9\x88\xd8\xb1.',
]
# strings for testing RTL
rtlStrings = [# Arabic followed by English
b'\xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a \xd9\x85\xd8\xaa\xd8\xa8\xd9\x88\xd8\xb9'
b' \xd8\xa8\xd9\x80 English.',
# Arabic with English in the middle
b'\xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a \xd9\x85\xd8\xb9 English \xd9\x81\xd9\x8a'
b' \xd8\xa7\xd9\x84\xd9\x85\xd9\x86\xd8\xaa\xd8\xb5\xd9\x81.',
# Arabic symbols (!@##$%^&*) English
b'\xd8\xb1\xd9\x85\xd9\x88\xd8\xb2 \xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a\xd8\xa9'
b' (!@#$%^&*) English.',
# 123 from right to left 123 integer numbers 123. ((testing integers in RTL))
b'123 \xd9\x85\xd9\x86 \xd8\xa7\xd9\x84\xd9\x8a\xd9\x85\xd9\x8a\xd9\x86'
b' \xd8\xa5\xd9\x84\xd9\x89 \xd8\xa7\xd9\x84\xd9\x8a\xd8\xb3\xd8\xa7\xd8\xb1'
b' 123 \xd8\xa3\xd8\xb1\xd9\x82\xd8\xa7\xd9\x85'
b' \xd8\xb5\xd8\xad\xd9\x8a\xd8\xad\xd8\xa9 123.',
# 456.78 from right to left 456.78 decimal numbers 456.78. ((testing decimals in RTL))
b'456.78 \xd9\x85\xd9\x86 \xd8\xa7\xd9\x84\xd9\x8a\xd9\x85\xd9\x8a\xd9\x86'
b' \xd8\xa5\xd9\x84\xd9\x89 \xd8\xa7\xd9\x84\xd9\x8a\xd8\xb3\xd8\xa7\xd8\xb1'
b' 456.78 \xd8\xa3\xd8\xb1\xd9\x82\xd8\xa7\xd9\x85'
b' \xd8\xb9\xd8\xb4\xd8\xb1\xd9\x8a\xd8\xa9 456.78.',
# Long Arabic text with LTR text in the middle, splitting over multiple lines
b'\xd9\x86\xd8\xb5 \xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a \xd8\xb7\xd9\x88\xd9\x8a\xd9\x84'
b' Long Arabic text \xd9\x85\xd8\xb9 with \xd9\x83\xd8\xaa\xd8\xa7\xd8\xa8\xd8\xa9'
b' \xd9\x85\xd9\x86 \xd8\xa7\xd9\x84\xd9\x8a\xd8\xb3\xd8\xa7\xd8\xb1'
b' \xd8\xa5\xd9\x84\xd9\x89 \xd8\xa7\xd9\x84\xd9\x8a\xd9\x85\xd9\x8a\xd9\x86'
b' LTR script \xd9\x81\xd9\x8a \xd8\xa7\xd9\x84\xd9\x88\xd8\xb3\xd8\xb7\xd8\x8c'
b' in the middle, \xd9\x85\xd9\x82\xd8\xb3\xd9\x85 splitted'
b' \xd8\xb9\xd9\x84\xd9\x89 over \xd8\xb9\xd8\xaf\xd8\xa9'
b' \xd8\xb3\xd8\xb7\xd9\x88\xd8\xb1 multiple lines.'
]
assert len(ltrStrings) == len(rtlStrings)
n = len(ltrStrings)
# create a store to be printed
story = []
story.append(Paragraph("<b><i>Following pairs of left justified texts have style.wordWrap=None & 'LTR'.</i></b><br/>",stySTD))
# write every LTR string and its corresponding RTL string to be matched.
for i in xrange(n):
story.append(Paragraph(ltrStrings[i], stySTD))
story.append(Paragraph(ltrStrings[i], styLTR))
story.append(Paragraph("<br/><b><i>Following pairs of right justfied texts have style.wordWrap=None & 'RTL'.</i></b><br/>",stySTD))
for i in xrange(n):
story.append(Paragraph(rtlStrings[i], styRJ))
story.append(Paragraph(rtlStrings[i], styRTL))
story.append(Paragraph("<b><i><br/>Following texts have style.wordWrap='RTL'</i></b>",stySTD))
# a few additional scripts for testing.
story.append(
Paragraph(b'\xd9\x87\xd8\xb0\xd9\x87 \xd9\x81\xd9\x82\xd8\xb1\xd8\xa9'
b' \xd8\xb9\xd8\xa7\xd8\xaf\xd9\x8a\xd8\xa9. ', styRTL))
story.append(
Paragraph(b'\xd9\x87\xd8\xb0\xd9\x87 \xd8\xa7\xd9\x84\xd9\x81\xd9\x82\xd8\xb1\xd8\xa9'
b' \xd9\x84\xd8\xaf\xd9\x8a\xd9\x87\xd8\xa7 12'
b' \xd9\x86\xd9\x82\xd8\xb7\xd8\xa9 \xd9\x82\xd8\xa8\xd9\x84\xd9\x87\xd8\xa7'
b' \xd9\x88\xd8\xa8\xd8\xb9\xd8\xaf\xd9\x87\xd8\xa7. ', styRTL))
story.append(
Paragraph(b'<para spacebefore="12" spaceafter="12">'
b'\xd9\x87\xd8\xb0\xd9\x87 \xd8\xa7\xd9\x84\xd9\x81\xd9\x82\xd8\xb1\xd8\xa9'
b' \xd9\x84\xd8\xaf\xd9\x8a\xd9\x87\xd8\xa7 12 \xd9\x86\xd9\x82\xd8\xb7\xd8\xa9'
b' \xd9\x82\xd8\xa8\xd9\x84\xd9\x87\xd8\xa7'
b' \xd9\x88\xd8\xa8\xd8\xb9\xd8\xaf\xd9\x87\xd8\xa7\xd8\x8c'
b' \xd9\x85\xd8\xad\xd8\xaf\xd8\xaf\xd8\xa9 \xd8\xa8\xd9\x80 XML.'
b' \xd8\xa5\xd9\x86\xd9\x87\xd8\xa7 \xd8\xaa\xd8\xb9\xd9\x85\xd9\x84'
b' \xd8\xa3\xd9\x8a\xd8\xb6\xd8\xa7! \xd9\x80.'
b'</para>',
styRTL))
# TODO: add more RTL scripts to the test (Farsi, Hebrew, etc.)
template = SimpleDocTemplate(outputfile('test_paragraphs_bidi.pdf'))
template.build(story)
def testRTLBullets(self):
try:
import mwlib.ext
except ImportError:
pass
font_name = getAFont()
doc = SimpleDocTemplate(outputfile('test_rtl_bullets.pdf'),showBoundary=True)
p_style = ParagraphStyle('default')
p_style.leftIndent = 0
p_style.rightIndent = 0
list_styles=[ParagraphStyle('list%d' % n) for n in range(3)]
all_styles = list_styles[:]
all_styles.append(p_style)
direction='rtl'
for s in all_styles:
s.fontSize = 15
s.leading = s.fontSize*1.2
s.fontName = font_name
if direction=='rtl':
s.wordWrap = 'RTL'
s.alignment = TA_RIGHT
else:
s.alignment = TA_JUSTIFY
indent_amount = 20
for list_lvl, list_style in enumerate(list_styles):
list_lvl += 1
list_style.bulletIndent = indent_amount*(list_lvl-1)
if direction=='rtl':
list_style.rightIndent = indent_amount*list_lvl
else:
list_style.leftIndent = indent_amount*list_lvl
elements =[]
TEXTS=[
b'\xd7\xa9\xd7\xa8 \xd7\x94\xd7\x91\xd7\x99\xd7\x98\xd7\x97\xd7\x95\xd7\x9f, \xd7\x94\xd7\x95\xd7\x90 \xd7\x94\xd7\xa9\xd7\xa8 \xd7\x94\xd7\x90\xd7\x97\xd7\xa8\xd7\x90\xd7\x99 \xd7\xa2\xd7\x9c \xd7\x9e\xd7\xa9\xd7\xa8\xd7\x93 \xd7\x96\xd7\x94. \xd7\xaa\xd7\xa4\xd7\xa7\xd7\x99\xd7\x93 \xd7\x96\xd7\x94 \xd7\xa0\xd7\x97\xd7\xa9\xd7\x91 \xd7\x9c\xd7\x90\xd7\x97\xd7\x93 \xd7\x94\xd7\xaa\xd7\xa4\xd7\xa7\xd7\x99\xd7\x93\xd7\x99\xd7\x9d \xd7\x94\xd7\x91\xd7\x9b\xd7\x99\xd7\xa8\xd7\x99\xd7\x9d \xd7\x91\xd7\x9e\xd7\x9e\xd7\xa9\xd7\x9c\xd7\x94. \xd7\x9c\xd7\xa9\xd7\xa8 \xd7\x94\xd7\x91\xd7\x99\xd7\x98\xd7\x97\xd7\x95\xd7\x9f \xd7\x9e\xd7\xaa\xd7\x9e\xd7\xa0\xd7\x94 \xd7\x9c\xd7\xa8\xd7\x95\xd7\x91 \xd7\x92\xd7\x9d \xd7\xa1\xd7\x92\xd7\x9f \xd7\xa9\xd7\xa8.',
b'\xd7\xa9\xd7\xa8 \xd7\x94\xd7\x91\xd7\x99\xd7\x98\xd7\x97\xd7\x95\xd7\x9f, <b>\xd7\x94\xd7\x95\xd7\x90 \xd7\x94\xd7\xa9\xd7\xa8 \xd7\x94\xd7\x90\xd7\x97\xd7\xa8\xd7\x90\xd7\x99 \xd7\xa2\xd7\x9c \xd7\x9e\xd7\xa9\xd7\xa8\xd7\x93 \xd7\x96\xd7\x94.</b> \xd7\xaa\xd7\xa4\xd7\xa7\xd7\x99\xd7\x93 \xd7\x96\xd7\x94 <i>\xd7\xa0\xd7\x97\xd7\xa9\xd7\x91 \xd7\x9c\xd7\x90\xd7\x97\xd7\x93</i> \xd7\x94\xd7\xaa\xd7\xa4\xd7\xa7\xd7\x99\xd7\x93\xd7\x99\xd7\x9d <b><i>\xd7\x94\xd7\x91\xd7\x9b\xd7\x99\xd7\xa8\xd7\x99\xd7\x9d \xd7\x91\xd7\x9e\xd7\x9e\xd7\xa9\xd7\x9c\xd7\x94</i></b>. \xd7\x9c\xd7\xa9\xd7\xa8 \xd7\x94\xd7\x91\xd7\x99\xd7\x98\xd7\x97\xd7\x95\xd7\x9f \xd7\x9e\xd7\xaa\xd7\x9e\xd7\xa0\xd7\x94 \xd7\x9c\xd7\xa8\xd7\x95\xd7\x91 \xd7\x92\xd7\x9d \xd7\xa1\xd7\x92\xd7\x9f \xd7\xa9\xd7\xa8.',
u'<bullet>\u2022</bullet>\u05e9\u05e8 \u05d4\u05d1\u05d9\u05d8\u05d7\u05d5\u05df, <b>\u05d4\u05d5\u05d0 \u05d4\u05e9\u05e8 \u05d4\u05d0\u05d7\u05e8\u05d0\u05d9 \u05e2\u05dc \u05de\u05e9\u05e8\u05d3 \u05d6\u05d4.</b> \u05ea\u05e4\u05e7\u05d9\u05d3 \u05d6\u05d4 <i>\u05e0\u05d7\u05e9\u05d1 \u05dc\u05d0\u05d7\u05d3</i> \u05d4\u05ea\u05e4\u05e7\u05d9\u05d3\u05d9\u05dd <b><i>\u05d4\u05d1\u05db\u05d9\u05e8\u05d9\u05dd \u05d1\u05de\u05de\u05e9\u05dc\u05d4</i></b>. \u05dc\u05e9\u05e8\u05d4\u05d1\u05d9\u05d8\u05d7\u05d5\u05df \u05de\u05ea\u05de\u05e0\u05d4 \u05dc\u05e8\u05d5\u05d1 \u05d2\u05dd \u05e1\u05d2\u05df \u05e9\u05e8.',
]
# simple text in a paragraph
# working with patch from Hosam Aly
p = Paragraph(TEXTS[0], p_style)
elements.append(p)
elements.append(Spacer(0, 40))
# uses intra paragraph markup -> style text
p = Paragraph(TEXTS[1], p_style)
elements.append(p)
elements.append(Spacer(0, 40))
# list item (just a paragraph with a leading <bullet> element
for list_style in list_styles:
p = Paragraph(TEXTS[2], list_style)
elements.append(p)
doc.build(elements)
def testParsing(self):
fontName = getAFont()
fontNameBI = tt2ps(fontName,1,1)
stySTD = ParagraphStyle('STD',fontName=fontName)
styBI = ParagraphStyle('BI',fontName=fontNameBI)
self.assertRaises(ValueError,Paragraph,'aaaa <b><i>bibibi</b></i> ccccc',stySTD)
self.assertRaises(ValueError,Paragraph,'AAAA <b><i>BIBIBI</b></i> CCCCC',styBI)
def makeSuite():
return makeSuiteForClasses(ParagraphTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| gpl-3.0 |
moijes12/oh-mainline | vendor/packages/irc/scripts/servermap.py | 14 | 4670 | #! /usr/bin/env python
#
# Example program using irc.client.
#
# Copyright (C) 1999-2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Joel Rosdahl <joel@rosdahl.net>
#
# servermap connects to an IRC server and finds out what other IRC
# servers there are in the net and prints a tree-like map of their
# interconnections.
#
# Example:
#
# % ./servermap irc.dal.net somenickname
# Connecting to server...
# Getting links...
#
# 26 servers (18 leaves and 8 hubs)
#
# splitrock.tx.us.dal.net
# `-vader.ny.us.dal.net
# |-twisted.ma.us.dal.net
# |-sodre.nj.us.dal.net
# |-glass.oh.us.dal.net
# |-distant.ny.us.dal.net
# | |-algo.se.eu.dal.net
# | | |-borg.se.eu.dal.net
# | | | `-ced.se.eu.dal.net
# | | |-viking.no.eu.dal.net
# | | |-inco.fr.eu.dal.net
# | | |-paranoia.se.eu.dal.net
# | | |-gaston.se.eu.dal.net
# | | | `-powertech.no.eu.dal.net
# | | `-algo-u.se.eu.dal.net
# | |-philly.pa.us.dal.net
# | |-liberty.nj.us.dal.net
# | `-jade.va.us.dal.net
# `-journey.ca.us.dal.net
# |-ion.va.us.dal.net
# |-dragons.ca.us.dal.net
# |-toronto.on.ca.dal.net
# | `-netropolis-r.uk.eu.dal.net
# | |-traced.de.eu.dal.net
# | `-lineone.uk.eu.dal.net
# `-omega.ca.us.dal.net
import irc.client
import sys
def on_connect(connection, event):
sys.stdout.write("\nGetting links...")
sys.stdout.flush()
connection.links()
def on_passwdmismatch(connection, event):
print("Password required.")
sys.exit(1)
def on_links(connection, event):
global links
links.append((event.arguments[0],
event.arguments[1],
event.arguments[2]))
def on_endoflinks(connection, event):
global links
print("\n")
m = {}
for (to_node, from_node, desc) in links:
if from_node != to_node:
m[from_node] = m.get(from_node, []) + [to_node]
if connection.get_server_name() in m:
if len(m[connection.get_server_name()]) == 1:
hubs = len(m) - 1
else:
hubs = len(m)
else:
hubs = 0
print("%d servers (%d leaves and %d hubs)\n" % (len(links), len(links)-hubs, hubs))
print_tree(0, [], connection.get_server_name(), m)
connection.quit("Using irc.client.py")
def on_disconnect(connection, event):
sys.exit(0)
def indent_string(level, active_levels, last):
if level == 0:
return ""
s = ""
for i in range(level-1):
if i in active_levels:
s = s + "| "
else:
s = s + " "
if last:
s = s + "`-"
else:
s = s + "|-"
return s
def print_tree(level, active_levels, root, map, last=0):
sys.stdout.write(indent_string(level, active_levels, last)
+ root + "\n")
if root in map:
list = map[root]
for r in list[:-1]:
print_tree(level+1, active_levels[:]+[level], r, map)
print_tree(level+1, active_levels[:], list[-1], map, 1)
def main():
global links
if len(sys.argv) != 3:
print("Usage: servermap <server[:port]> <nickname>")
sys.exit(1)
links = []
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
nickname = sys.argv[2]
client = irc.client.IRC()
sys.stdout.write("Connecting to server...")
sys.stdout.flush()
try:
c = client.server().connect(server, port, nickname)
except irc.client.ServerConnectionError as x:
print(x)
sys.exit(1)
c.add_global_handler("welcome", on_connect)
c.add_global_handler("passwdmismatch", on_passwdmismatch)
c.add_global_handler("links", on_links)
c.add_global_handler("endoflinks", on_endoflinks)
c.add_global_handler("disconnect", on_disconnect)
client.process_forever()
if __name__ == '__main__':
main()
| agpl-3.0 |
Befera/portfolio | node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 2542 | 1970 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
| mit |
callmealien/wazimap_zambia | census/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py | 1783 | 19590 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
| mit |
niijv/webvita | wsgi/webvita/blogposts.py | 2 | 5828 | # -*-coding: utf-8 -*-
from webvita import app, db
from flask import request, session, redirect, url_for, abort,\
render_template, flash
from flask.ext.sqlalchemy import SQLAlchemy
from models import Blogpost, Tag, User
from datetime import datetime
from helpers import is_blogpost_unique, is_blogpost_short_unique,\
delete_unused_tags
import markdown
@app.route('/blog/<blogpost_short_title>')
def show_blogpost(blogpost_short_title):
blogpost = Blogpost.query.filter_by(short_title=blogpost_short_title)\
.first_or_404()
return render_template('show_blogpost.html', blogpost=blogpost)
@app.route('/blog/create')
def create_blogpost():
if not session.get('logged_in'):
abort(401)
return render_template('create_blogpost.html')
@app.route('/blog/add', methods=['GET', 'POST'])
def add_blogpost():
if not session.get('logged_in'):
abort(401)
title = unicode(request.form['title'])
if not is_blogpost_unique(title):
flash('Title already exists. Please choose a different title ' + \
'for your blogpost.')
# TODO: send previous data
return redirect(url_for('create_blogpost'))
short_title = unicode(request.form['short_title']).replace(' ', '-')
if not is_blogpost_short_unique(short_title):
flash('Short title already exists. Please choose a different ' + \
'short title for your blogpost.')
# TODO: send previous data
return redirect(url_for('create_blogpost'))
subtitle = unicode(request.form['subtitle'])
text_markdown = unicode(request.form['text'])
text_html = markdown.markdown(text_markdown, ['codehilite'])
blogpost_tags = []
tags = unicode(request.form['tags'])
for t in tags.split(','):
t = t.strip()
if t:
blogpost_tags.append(t)
user = User.query.filter_by(username=session['user']).first_or_404()
blogpost = Blogpost(user, title, subtitle, short_title, text_markdown,
text_html, blogpost_tags, hidden=False)
db.session.add(blogpost)
db.session.commit()
flash('New blogpost has been added.')
return redirect(url_for('show_blog'))
@app.route('/blog/edit')
def edit_blogpost_list():
if not session.get('logged_in'):
abort(401)
blogposts = Blogpost.query.order_by('posted desc').all()
return render_template('edit_blogpost_list.html', blogposts=blogposts)
@app.route('/blog/edit/<blogpost_short_title>')
def edit_blogpost(blogpost_short_title):
if not session.get('logged_in'):
abort(401)
blogpost = Blogpost.query.filter_by(short_title=blogpost_short_title).first_or_404()
tags = ', '.join([tag.name for tag in blogpost.tags])
return render_template('edit_blogpost.html',
blogpost=blogpost,
tags=tags)
@app.route('/blog/update/<blogpost_short_title>', methods=['GET', 'POST'])
def update_blogpost(blogpost_short_title):
if not session.get('logged_in'):
abort(401)
old_bp = Blogpost.query.filter_by(short_title=blogpost_short_title)\
.first_or_404()
blogpost_title = old_bp.title
title = unicode(request.form['title'])
if title!=blogpost_title and not is_blogpost_unique(title):
flash('Title already exists. Please choose a different title' + \
' for your blogpost.')
# TODO: send previous data
return redirect(url_for('edit_blogpost',
blogpost_title=blogpost_title))
short_title = unicode(request.form['short_title']).replace(' ', '-')
if short_title!=blogpost_short_title and not \
is_blogpost_short_unique(short_title):
flash('Short title already exists. Please choose a different ' + \
'short title for your blogpost.')
# TODO: send previous data
return redirect(url_for('edit_blogpost',
blogpost_title=blogpost_title))
subtitle = unicode(request.form['subtitle'])
text_markdown = unicode(request.form['text'])
text_html = markdown.markdown(text_markdown, ['codehilite'])
old_tags = list(old_bp.tags)
blogpost_tags = []
tags = unicode(request.form['tags'])
for t in tags.split(','):
t = t.strip()
if t:
blogpost_tags.append(t)
old_bp.title = title
old_bp.subtitle = subtitle
old_bp.short_title = short_title
old_bp.text_markdown = text_markdown
old_bp.text_html = text_html
old_bp.update_tags(blogpost_tags)
old_bp.edited = datetime.utcnow()
db.session.commit()
delete_unused_tags(old_tags)
flash('Blogpost has been updated.')
return redirect(url_for('show_blogpost',
blogpost_short_title=short_title))
@app.route('/blog/delete/<blogpost_short_title>')
def delete_blogpost(blogpost_short_title):
if not session.get('logged_in'):
abort(401)
blogpost = Blogpost.query.filter_by(short_title=blogpost_short_title)\
.first_or_404()
old_tags = list(blogpost.tags)
db.session.delete(blogpost)
db.session.commit()
delete_unused_tags(old_tags)
flash('Blogpost has been deleted.')
return render_template('dashboard.html')
@app.route('/blog/tag/<tag_name>')
def show_tag(tag_name):
blogposts = Blogpost.query.order_by('posted desc')\
.filter(Blogpost.tags.any(name=tag_name)).all()
return render_template('show_tag_blogposts.html',
tag_name=tag_name,
blogposts=blogposts)
| mit |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/KhanAcademy/Exercises/GetExercise.py | 5 | 2915 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetExercise
# Retrieves the specified exercise.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetExercise(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetExercise Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetExercise, self).__init__(temboo_session, '/Library/KhanAcademy/Exercises/GetExercise')
def new_input_set(self):
return GetExerciseInputSet()
def _make_result_set(self, result, path):
return GetExerciseResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetExerciseChoreographyExecution(session, exec_id, path)
class GetExerciseInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetExercise
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ExerciseName(self, value):
"""
Set the value of the ExerciseName input for this Choreo. ((required, string) The name of the exercise to retrieve (e.g. logarithms_1))
"""
super(GetExerciseInputSet, self)._set_input('ExerciseName', value)
class GetExerciseResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetExercise Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Khan Academy.)
"""
return self._output.get('Response', None)
class GetExerciseChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetExerciseResultSet(response, path)
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/io/tests/parser/quoting.py | 7 | 5796 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assertRaisesRegexp(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0394'))
tm.assert_frame_equal(result, expected)
| apache-2.0 |
jellyshen/shadowsocks | tests/test.py | 1016 | 5029 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import signal
import select
import time
import argparse
from subprocess import Popen, PIPE
python = ['python']
default_url = 'http://localhost/'
parser = argparse.ArgumentParser(description='test Shadowsocks')
parser.add_argument('-c', '--client-conf', type=str, default=None)
parser.add_argument('-s', '--server-conf', type=str, default=None)
parser.add_argument('-a', '--client-args', type=str, default=None)
parser.add_argument('-b', '--server-args', type=str, default=None)
parser.add_argument('--with-coverage', action='store_true', default=None)
parser.add_argument('--should-fail', action='store_true', default=None)
parser.add_argument('--tcp-only', action='store_true', default=None)
parser.add_argument('--url', type=str, default=default_url)
parser.add_argument('--dns', type=str, default='8.8.8.8')
config = parser.parse_args()
if config.with_coverage:
python = ['coverage', 'run', '-p', '-a']
client_args = python + ['shadowsocks/local.py', '-v']
server_args = python + ['shadowsocks/server.py', '-v']
if config.client_conf:
client_args.extend(['-c', config.client_conf])
if config.server_conf:
server_args.extend(['-c', config.server_conf])
else:
server_args.extend(['-c', config.client_conf])
if config.client_args:
client_args.extend(config.client_args.split())
if config.server_args:
server_args.extend(config.server_args.split())
else:
server_args.extend(config.client_args.split())
if config.url == default_url:
server_args.extend(['--forbidden-ip', ''])
p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p3 = None
p4 = None
p3_fin = False
p4_fin = False
# 1 shadowsocks started
# 2 curl started
# 3 curl finished
# 4 dig started
# 5 dig finished
stage = 1
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
if not line:
if stage == 2 and fd == p3.stdout:
stage = 3
if stage == 4 and fd == p4.stdout:
stage = 5
if bytes != str:
line = str(line, 'utf8')
sys.stderr.write(line)
if line.find('starting local') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if stage == 1:
time.sleep(2)
p3 = Popen(['curl', config.url, '-v', '-L',
'--socks5-hostname', '127.0.0.1:1081',
'-m', '15', '--connect-timeout', '10'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p3 is not None:
fdset.append(p3.stdout)
fdset.append(p3.stderr)
stage = 2
else:
sys.exit(1)
if stage == 3 and p3 is not None:
fdset.remove(p3.stdout)
fdset.remove(p3.stderr)
r = p3.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
else:
if r != 0:
sys.exit(1)
if config.tcp_only:
break
p4 = Popen(['socksify', 'dig', '@%s' % config.dns,
'www.google.com'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p4 is not None:
fdset.append(p4.stdout)
fdset.append(p4.stderr)
stage = 4
else:
sys.exit(1)
if stage == 5:
r = p4.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
print('test passed (expecting failure)')
else:
if r != 0:
sys.exit(1)
print('test passed')
break
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGINT)
os.waitpid(p.pid, 0)
except OSError:
pass
| apache-2.0 |
kristopher-h/vim-lsp | tools/create_portable_pyls.py | 2 | 4830 | #!/usr/bin/env python3
# Copyright 2017 Kristopher Heijari
#
# This file is part of vim-liq.
#
# vim-liq is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# vim-liq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vim-liq. If not, see <http://www.gnu.org/licenses/>.
"""Install the palantir python lsp server. """
import argparse
import logging
import os
import shutil
import subprocess
import sys
import tempfile
try:
from urllib import urlretrieve as http_download
except ImportError:
from urllib.request import urlretrieve as http_download
import zipfile
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
URL = "https://github.com/palantir/python-language-server/archive/0.21.2.zip"
UNZIPPED_NAME = "python-language-server-0.21.2"
ZIP_NAME = "python_lsp.zip"
INSTALL_DIR_NAME = "python_lsp_server"
DEFAULT_TARGET_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../plugin/servers/python/")
BACKPORTS_INIT = "__path__ = __import__('pkgutil').extend_path(__path__, __name__)\n"
PYLS_MAIN = """#!/usr/bin/env python
import glob
import os
import re
import sys
f_path = os.path.dirname(os.path.abspath(__file__))
if sys.version_info[0] >= 3:
sitepack = glob.glob(os.path.join(f_path, "lib/python3.[0-9]/site-packages"))[0]
else:
sitepack = os.path.join(f_path, "lib/python2.7/site-packages")
sys.path.insert(0, sitepack)
from pyls.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
"""
def install(dest_dir, zipapp=False):
"""Install python lsp server from palantir."""
tempdir = tempfile.mkdtemp()
log.debug("Created temporary directory %s", tempdir)
try:
install_dir = os.path.join(tempdir, INSTALL_DIR_NAME)
zip_path = os.path.join(tempdir, ZIP_NAME)
log.debug("Downloading %s", URL)
http_download(URL, filename=zip_path)
with zipfile.ZipFile(zip_path, "r") as unzipit:
log.debug("Unzipping %s to %s", zip_path, tempdir)
unzipit.extractall(path=tempdir)
extras = "[rope,yapf,mccabe,pyflakes,pycodestyle,pydocstyle]"
# install for py2
subprocess.check_call(
["pip2.7", "install", "--no-compile", "--prefix", install_dir, "--ignore-installed",
"--upgrade", os.path.join(tempdir, UNZIPPED_NAME) + extras])
# install for py3
subprocess.check_call(
["pip3", "install", "--no-compile", "--prefix", install_dir, "--ignore-installed",
"--upgrade", os.path.join(tempdir, UNZIPPED_NAME) + extras])
# We need to create this init file since the import for configparser for python2
# otherwise fails. Since the pth file in site-packages is not read. Note that adding the
# path with "import site; site.addsite(...) does not seem to work either (guessing it is
# due to the zipapp bundling).
backports_init = os.path.join(install_dir,
"lib/python2.7/site-packages/backports/__init__.py")
pyls_main = os.path.join(install_dir, "__main__.py")
with open(backports_init, "w") as file_:
file_.write(BACKPORTS_INIT)
with open(pyls_main, "w") as file_:
file_.write(PYLS_MAIN)
if zipapp:
subprocess.check_call(
["python3", "-m", "zipapp", "-o", os.path.join(dest_dir, pylz.pyz),
"-p", "/usr/bin/env python", install_dir])
else:
pyls_dir = os.path.join(dest_dir, "pyls")
if os.path.exists(pyls_dir):
shutil.rmtree(pyls_dir)
shutil.copytree(install_dir, pyls_dir)
finally:
# Always delete tempdir after finishing
shutil.rmtree(tempdir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--zipapp", action="store_true", help="Create a zipapp")
parser.add_argument("--target", help="Target directory.", default=DEFAULT_TARGET_DIR)
args = parser.parse_args()
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
install(args.target, args.zipapp)
if __name__ == "__main__":
main()
| gpl-3.0 |
vishnuprasadb/ResumeParser.py | code.py | 1 | 33299 | class ResumeXmlHandler(xml.sax.ContentHandler):
def __init__(self):
self.node = ''
self.params = {}
self.error = None
def startElement(self, name, attrs):
self.node = name
def endElement(self, name):
self.node = ''
def characters(self,content):
if self.node == 'Summary':
for dirty in ['Work Sumamry','WORK SUMMARY','Summary','Professional Summary','PROFESSIONAL','Professional:','PROFESSIONAL:','PROFESSIONAL :','PROFESSIONAL SUMMARY:','Professional summary','SUMMARY','PROFESSIONAL SUMMARY','SUMMARY:','EXECUTIVE SUMMARY']:
if dirty in content:
content=content.replace(dirty,'')
if self.params.has_key(self.node.lower()):
self.params[self.node.lower()] += _my_unescape(content)
else:
self.params[self.node.lower()] = _my_unescape(content)
else:
content=content.strip()
if self.node == 'ResumeFileName':
self.params['resumefilename'] = _my_unescape(content)
elif self.node == 'FirstName':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'LastName':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'Email':
for dirty in ['E-mail-','-',':-',':',' ']:
if dirty in content:
content=content.split(dirty)[1]
self.params[self.node.lower()] = _my_unescape(content.strip())
elif self.node == 'Phone' or self.node == 'Mobile' or self.node == 'FormattedPhone' or self.node =='FormattedMobile':
content=content.replace(' ','')
if len(content)>10:
self.params[self.node.lower()] = _my_unescape(content)[-10:]
elif len(content)<10:
pass
else:
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'Address':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'City':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'State':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'ZipCode':
self.params['pincode'] = _my_unescape(content)
elif self.node == 'Skill':
skill_list = _my_unescape(content).replace('\n', ',').replace('\r', '').split(',')
labels = ['language', 'frameworks', 'tools', 'operating systems','training','analysis','module','user interface','programming','trouble shooting','capability','specifications','platforms','modules','cleanliness','assembly']
self.params['claimed_skills'] = ''
for skill in skill_list:
skill = skill.strip()
if skill.lower() not in labels:
self.params['claimed_skills'] += 's_%s,'%skill
# candidate profile fields
elif self.node == 'Gender':
gender_dict={'Female':'F',
'Male':'M',
'Other':'O'
}
gender=_my_unescape(content)
if gender in gender_dict.keys():
self.params[self.node.lower()] = gender_dict[gender]
elif self.node =='DateOfBirth':
self.params[self.node.lower()] = _parse_date(_my_unescape(content))
elif self.node == 'LicenseNo':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'Nationality':
self.params[self.node.lower()] = _my_unescape(content)
#End of ResumeXmlHandler()
def _parse_date(data):
if data.count('/') ==2:
DOB = data.split('/')
date_of_birth = '%s-%s-%s'%(DOB[2],DOB[1],DOB[0])
else:
date_of_birth =''
return date_of_birth
class ProfileFieldsSanitizer(file):
def __init__(self,file):
self.invalid_fields = {}
self.filename = file
_NAMEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 150,
}
_EMAILVALIDATOR = {
'type': 'str',
'regex': '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)',
'maxlen': 30,
}
_PHONEVALIDATOR = {
'type': 'str',
'regex': '^\+?1?\d{9,15}$',
'maxlen': 15,
}
_ADDRVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_CITYVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_STATEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_PINVALIDATOR = {
'type': 'int',
'regex': '\d{5,6}',
'maxval': 999999,
'minval': 0,
}
_CLAIMEDSKILLVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 8192,
}
_GENDERVALIDATOR = {
'type': 'str',
'regex': '[FMO]',
'maxlen': 1,
}
_DOBVALIDATOR = {
'type': 'str',
'regex': '\d{4}\-(0?[1-9]|1[012])\-(0?[1-9]|[12][0-9]|3[01])',
'maxlen': 10,
}
_LICENSEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 512,
}
_SUMMARYVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 4096,
}
_CTCVALIDATOR = {
'type': 'float',
'regex': '.*',
'maxval': 9999999999.99,
'minval': 0,
}
_NPVALIDATOR = {
'type': 'int',
'regex': '.*',
'maxval': 12,
'minval': -1,
}
_DEGREEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 16,
}
_BRANCHVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 32,
}
_COLLEGEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_UNIVERSITYVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_DEGREEYEARVALIDATOR = {
'type': 'int',
'regex': '.*',
'maxval': 1947,
'minval': 2100,
}
_PERFORMANCEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_EMPLOYERVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_ROLEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 1024,
}
_JOBLOCATIONVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_JOBDATEVALIDATOR = {
'type': 'str',
'regex': '\d{4}\-(0?[1-9]|1[012])\-(0?[1-9]|[12][0-9]|3[01])',
'maxlen': 10,
}
_JOBPERIODVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_JDVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 8192,
}
_validators = {
'firstname': _NAMEVALIDATOR,
'lastname': _NAMEVALIDATOR,
'email': _EMAILVALIDATOR,
'phone': _PHONEVALIDATOR,
'address': _ADDRVALIDATOR,
'address1': _ADDRVALIDATOR,
'address2': _ADDRVALIDATOR,
'city': _CITYVALIDATOR,
'state': _STATEVALIDATOR,
'pincode': _PINVALIDATOR,
'claimed_skills': _CLAIMEDSKILLVALIDATOR,
'gender': _GENDERVALIDATOR,
'dateofbirth': _DOBVALIDATOR,
'birthdate': _DOBVALIDATOR,
'licenseno': _LICENSEVALIDATOR,
'dl_number': _LICENSEVALIDATOR,
'summary': _SUMMARYVALIDATOR,
'annualCtc': _CTCVALIDATOR,
'annualctc': _CTCVALIDATOR,
'expectedCtc': _CTCVALIDATOR,
'expectedctc': _CTCVALIDATOR,
'curSalary': _CTCVALIDATOR,
'expSalary': _CTCVALIDATOR,
'noticePeriod': _NPVALIDATOR,
'degree': _DEGREEVALIDATOR,
'branch': _BRANCHVALIDATOR,
'college': _COLLEGEVALIDATOR,
'university': _UNIVERSITYVALIDATOR,
'year': _DEGREEYEARVALIDATOR,
'joiningYear': _DEGREEYEARVALIDATOR,
'graduationYear': _DEGREEYEARVALIDATOR,
'performance': _PERFORMANCEVALIDATOR,
'aggregate': _PERFORMANCEVALIDATOR,
'employer': _EMPLOYERVALIDATOR,
'jobprofile': _ROLEVALIDATOR,
'joblocation': _JOBLOCATIONVALIDATOR,
'startdate': _JOBDATEVALIDATOR,
'enddate': _JOBDATEVALIDATOR,
'jobperiod': _JOBPERIODVALIDATOR,
'jobdescritption': _JDVALIDATOR,
}
def _validate(self, validator, key, value):
if validator['type'] == 'str':
try:
value = str(value)
except:
self.invalid_fields[key] = value
return 'invalid'
if validator['type'] == 'int':
try:
int(value)
value = str(value)
except:
self.invalid_fields[key] = value
return 'invalid'
if validator['type'] == 'float':
try:
float(value)
value = str(value)
except:
self.invalid_fields[key] = value
return 'invalid'
if not re.match(validator['regex'],value):
self.invalid_fields[key] = value
return 'invalid'
if type(value) == 'string':
if validator['maxlen'] < len(value):
self.invalid_fields[key] = value
return 'invalid'
elif type(value) in ['int' or 'float']:
if validator['maxval'] < value:
self.invalid_fields[key] = value
return 'invalid'
if validator['minval'] > value:
self.invalid_fields[key] = value
return 'invalid'
return 'valid'
def validate(self, name, value):
validity = 'didnotvalidate'
if self._validators.has_key(name):
validator = self._validators[name]
validity = self._validate(validator, name, value)
if validity == 'invalid':
return False
else:
return True
def validate_all_fields(self, dict_in):
for k in dict_in.keys():
value = dict_in[k]
if type(value) is not dict:
if not self.validate(k, value):
dict_in[k] = None
else:
for i in value:
if not self.validate(i, value[i]):
dict_in[k][i] = None
if self.invalid_fields:
f=open(self.filename,'w')
f.write('Field,Value\r\n')
for field in self.invalid_fields:
try:
f.write('%s,%s\r\n'%(field,self.invalid_fields[field]))
except:
f.write('%s\r\n'%(field))
f.close()
else:
os.remove(self.filename)
# End of ProfileFieldsSanitizer()
#Resume parser using RChilli API
def _resume_parser(content_type,resume_data,err_resp=''):
logr.info("Incoming request to parser resume using RChilli API")
errors = ''
valid_content_types = {'application/pdf': 'pdf', 'application/msword': 'doc' , 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'docx', 'application/vnd.openxmlformats-officedocument.wordprocessingml.template':'dotx', 'application/vnd.ms-word.document.macroEnabled.12':'docm', 'application/vnd.ms-word.template.macroEnabled.12':'dotm'}
if content_type not in valid_content_types.keys() :
logr.info("Invalid content type-%s"%content_type)
errors += 'Invalid value (%s) for the field %s'%(content_type,"Content Type")
err_resp += '<errors>\r\n'
err_resp += _add_xml_field('errorStr',errors)
err_resp += '</errors>\r\n'
return ({},err_resp)
else:
pid=os.getpid()
filename = 'resume_%s.%s'%(pid,valid_content_types[content_type])
data = resume_data.split('#')[0]
decoded_data= b64decode(data)
f = open('/tmp/%s'%filename,'w')
f.write(decoded_data)
f.close()
# Dictionary for storing candidate info
# Key : model fields
# Value: the data
cand_res_data={}
cand_res_data['filename']=filename
retval = call(['/var/floLearning/RChilliParser/parser.sh',filename])
if retval != 0:
errors += "Error while parsing %s"%filename
err_resp += _add_xml_field("errorStr",errors)
return (cand_res_data,err_resp)
output_fn= '/content/candidates/resume_responses/xml_resp_%s.txt'%filename
with open(output_fn,'r') as f_name:
xmldata = f_name.read()
xmlhandler = ResumeXmlHandler()
error_file = 'errors_%s.txt'%pid
retval = call(['touch','/content/candidates/resume_responses/errors/%s'%error_file])
sanitizer = ProfileFieldsSanitizer('/content/candidates/resume_responses/errors/%s'%error_file)
try:
xml.sax.parseString(xmldata,xmlhandler)
except Exception as e:
errors += 'Error while parsing (SAX) %s\r\n'%filename
err_resp += _add_xml_field('errorStr',errors)
return (cand_res_data,err_resp)
cand_resume_data = xmlhandler
sanitizer.validate_all_fields(cand_resume_data.params)
cand_resume_data.params['error_file'] = error_file
cand_resume_data.params['filename'] = filename
cand_resume_data.params['rchillie_resp_file']='xml_resp_%s.txt'%filename
logr.info(cand_resume_data.params)
return (cand_resume_data.params,err_resp)
#end of _resume_parser
def _update_parsed_candidate_profile_helper(candidate_resume_data,candidate):
"""
Use case: This helper is used to update the candidate profile using the dictionary which contains the keys as model object fields and values as the respective data, got from the resume parser.
input: dictionary of resume parsed info
output: saving the data and returning the response of respective fields which are updated
"""
logr.info("Incoming data to update the candidate profile from resume parser output")
addr = candidate_resume_data.get('address',0)
city = candidate_resume_data.get('city',0)
state = candidate_resume_data.get('state',0)
pincode = candidate_resume_data.get('pincode',0)
claimed_skills= candidate_resume_data.get('claimed_skills',0)
gender = candidate_resume_data.get('gender',0)
dateofbirth= candidate_resume_data.get('dateofbirth',0)
licenseno = candidate_resume_data.get('licenseno',0)
summary = candidate_resume_data.get('summary',0)
nationality = candidate_resume_data.get('nationality',0)
candidate_dirty = addr_dirty = False
cand_addr, ignore = CurrentAddress.objects.get_or_create(candidate_id = candidate.id)
if addr:
addr_dirty = True
cand_addr.address1 = addr
if city:
addr_dirty = True
cand_addr.city= city
if state:
addr_dirty = True
cand_addr.state= state
if pincode:
addr_dirty = True
cand_addr.pin_code= pincode
if claimed_skills:
candidate_dirty = True
candidate.claimed_skills= claimed_skills
if candidate_dirty:
candidate.save()
if addr_dirty:
cand_addr.save()
# Saving candidate profile
candidate_profile = Profile.objects.get(candidate_id=candidate.id)
profile_dirty = False
if gender:
profile_dirty = True
candidate_profile.gender = gender
if dateofbirth:
profile_dirty = True
candidate_profile.birthdate = dateofbirth
if licenseno:
profile_dirty = True
candidate_profile.dl_number = licenseno
if nationality:
profile_dirty = True
candidate_profile.nationality = nationality
if summary:
profile_dirty = True
candidate_profile.summary = summary
if profile_dirty:
candidate_profile.save()
# End of _update_parsed_candidate_profile_helper
def _response_builder_from_parsed_resume_data(candidate,candidate_resume_data,resp):
"""
builds the response from the parsed resume info dictionary
"""
indent =3
resp += '\t\t<newProfile>\r\n'
resp += _add_xml_field("id", candidate.id,indent)
resp += _add_xml_field("email", candidate.user.email,indent)
resp += _add_xml_field("firstName", candidate.user.first_name,indent)
resp += _add_xml_field("middleName", candidate.middle_name,indent)
resp += _add_xml_field("lastName", candidate.user.last_name,indent)
resp += _add_xml_field("mobileNumber", candidate.phone_number,indent)
addr = candidate_resume_data.get('address',0)
city = candidate_resume_data.get('city',0)
state = candidate_resume_data.get('state',0)
pincode = candidate_resume_data.get('pincode',0)
claimed_skills= candidate_resume_data.get('claimed_skills',0)
gender = candidate_resume_data.get('gender',0)
dateofbirth= candidate_resume_data.get('dateofbirth',0)
licenseno = candidate_resume_data.get('licenseno',0)
summary = candidate_resume_data.get('summary',0)
nationality = candidate_resume_data.get('nationality',0)
indent_majority = ''
for i in range(indent):
indent_majority += '\t'
# Mock the current address field from the parsed resume dictionary
resp += indent_majority + '<currentAddress>\r\n'
if addr:
resp += _add_xml_field("address1",addr, indent+1)
else:
resp += _add_xml_field("address1","", indent+1)
resp += _add_xml_field("address2","", indent+1)
if city:
resp += _add_xml_field("city", city, indent+1)
else:
resp += _add_xml_field("city", '', indent+1)
if state:
resp += _add_xml_field("state", state, indent+1)
else:
resp += _add_xml_field("state", '', indent+1)
if pincode:
resp += _add_xml_field("pinCode", pincode, indent+1)
else:
resp += _add_xml_field("pinCode", '', indent+1)
resp += indent_majority + '</currentAddress>\r\n'
# Permenant Address fields are always blank in the newProfile fields after the resume parser
resp += indent_majority +'<permenantAddress>\r\n'
addr_fields = _get_required_object_fields("address")
(response, flag) = _add_fields(addr_fields, PermenantAddress(), indent+1, True)
resp += response
resp += indent_majority +'</permenantAddress>\r\n'
# Build candidate profile response
if gender:
resp += _add_xml_field("gender", gender, indent)
else:
resp += indent_majority + '<gender/>\r\n'
if dateofbirth:
resp += _add_xml_field("birthdate", dateofbirth, indent)
else:
resp += indent_majority + '<birthdate/>\r\n'
resp += indent_majority + '<dl_type/>\r\n'
if licenseno:
resp += _add_xml_field("dl_number", licenseno, indent)
else:
resp += indent_majority + '<dl_number/>\r\n'
if nationality:
resp += _add_xml_field("nationality", nationality, indent)
else:
resp += indent_majority + '<nationality/>\r\n'
resp += indent_majority + '<dl_expiry/>\r\n'
resp += indent_majority + '<height/>\r\n'
resp += indent_majority + '<weight/>\r\n'
resp += indent_majority + '<dl_expiry/>\r\n'
resp += indent_majority + '<dl_registered_state/>\r\n'
resp += indent_majority + '<interested_jobs/>\r\n'
resp += indent_majority + '<dreamJob/>\r\n'
resp += indent_majority + '<personalityStrengths/>\r\n'
resp += indent_majority + '<personalityWeaknesses/>\r\n'
resp += indent_majority + '<totalExperience/>\r\n'
resp += indent_majority + '<annualCtc/>\r\n'
resp += indent_majority + '<curSalary/>\r\n'
resp += indent_majority + '<curSalaryFreq/>\r\n'
resp += indent_majority + '<curSalaryCurrency/>\r\n'
resp += indent_majority + '<expectedRaise/>\r\n'
resp += indent_majority + '<expSalary/>\r\n'
resp += indent_majority + '<expSalaryFreq/>\r\n'
resp += indent_majority + '<expSalaryCurrency/>\r\n'
resp += indent_majority + '<noticePeriod/>\r\n'
resp += indent_majority + '<relocation/>\r\n'
resp += indent_majority + '<linkedInProfile/>\r\n'
resp += indent_majority + '<gitHubProfile/>\r\n'
resp += indent_majority + '<queryHomeProfile/>\r\n'
resp += _add_xml_field('status','Looking for job',indent)
resp += indent_majority + '<reference/>\r\n'
if summary:
resp += _add_xml_field("summary",summary, indent)
else:
resp += indent_majority +'<summary/>\r\n'
resp += indent_majority + '<Degree/>\r\n'
resp += indent_majority + '<WorkExperience/>\r\n'
resp += indent_majority + '<uniqueId/>\r\n'
resp += _add_xml_field("ownTwoWheeler", False, indent)
resp += '\t\t</newProfile>\r\n'
return resp
#end of _response_builder_from_parser_resume_data
def _inline_response_builder(resp,request,candidate,cand_resume_data,res_fn):
cid=candidate.id
_change_filename(cand_resume_data,cid)
if not int(candidate.phone_number):
for phone_field in ['phone','mobile','formattedphone','formattedmobile']:
if cand_resume_data.get(phone_field,0):
candidate.phone_number=cand_resume_data[phone_field]
if not candidate.user.last_name:
if cand_resume_data.get('lastname',0):
candidate.user.last_name=cand_resume_data['lastname']
candidate.save()
resp +='<resume>\r\n'
resp += _add_xml_field("filename", res_fn)
resp += '\t<candidate>\r\n'
existing_resp =_fetch_profile_helper(request,cid)
if not existing_resp:
resp += '\t\t<existingProfile/>\r\n'
_update_parsed_candidate_profile_helper(cand_resume_data,candidate)
else:
resp += existing_resp
response = _response_builder_from_parsed_resume_data(candidate,cand_resume_data,'')
resp += response
resp += '\t</candidate>\r\n'
return resp
def _change_filename(cand_resume_data,cid):
org_filename=cand_resume_data['rchillie_resp_file']
mod_filename=cand_resume_data['rchillie_resp_file']=cand_resume_data['rchillie_resp_file'][:15]+'_%s.txt'%cid
retval = call(['mv','/content/candidates/resume_responses/%s'%org_filename,'/content/candidates/resume_responses/%s'%mod_filename])
if retval !=0:
logr.info("resume response file is not getting saved")
org_error_file = cand_resume_data['error_file']
mod_error_file = cand_resume_data['error_file'] = org_error_file[:6]+'_%s.txt'%cid
error_file_path = '/content/candidates/resume_responses/errors/%s'%org_error_file
if os.path.exists(error_file_path):
retval = call(['mv',error_file_path,'/content/candidates/resume_responses/errors/%s'%mod_error_file])
if retval !=0:
logr.info("resume error file is not getting saved")
#End of _change_filename()
# Upload resume helper function
def _upload_resume(request, candidate, is_tmp_stored = False):
logr.info("Incoming request to upload resume; user: %s" % request.user.email)
resp = ''
errors = ''
cont_len = request.META['CONTENT_LENGTH']
if (int(cont_len) > 2048575):
errors += 'Invalid value (%s) for the field %s'%(cont_len,"Content-Length header")
resp += '<errors>\r\n'
resp += _add_xml_field('errorStr',errors)
resp += '</errors>\r\n'
return resp
# Read the request body
header, data = request.body.split(',', 1)
header = header[5:] # Header starts with "data:", so skip first 5 bytes
cont_type, encoding = header.split(';', 1)
valid_content_types = {'application/pdf': 'pdf', 'application/msword': 'doc' , 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'docx', 'application/vnd.openxmlformats-officedocument.wordprocessingml.template':'dotx', 'application/vnd.ms-word.document.macroEnabled.12':'docm', 'application/vnd.ms-word.template.macroEnabled.12':'dotm'}
if cont_type not in valid_content_types.keys():
errors += 'Invalid Content Type'
resp += _add_xml_field('errors',errors)
return resp
else:
# Fabricate our own resume file name - to avoid conflicts such as two people having file name as "resume.doc"
res_fn = 'resume_%s.%s' % (candidate.id, valid_content_types[cont_type])
if is_tmp_stored:
path = '/tmp/%s' % res_fn
try:
with open(path, 'wb+') as destination:
destination.write(b64decode(data.split('#')[0]))
resp += _add_xml_field("success", res_fn)
return resp
except Exception as e:
logr.info('Exception: %s' % str(e))
errors += 'Error while saving the resume'
resp +='<errors>\r\n'
resp += _add_xml_field('errorStr',errors)
resp +='</errors>\r\n'
return resp
else:
path = '/content/candidates/resume/%s' % res_fn
resumeUrl = "./images/content/s3content/candidates/resume/%s" % res_fn
if _isJobSeeker(request.user):
parsed_data_set = ParsedResumeData.objects.filter(candidate_id = candidate.id).order_by('-create_time')
if len(parsed_data_set) > 0:
parsed_data = parsed_data_set[0]
else:
parsed_data = None
year_ago = timezone.now() - timedelta(days = 365)
if not parsed_data:
(cand_resume_data,error_resp) = _resume_parser(cont_type,data,'')
if error_resp != '':
resp +='<resume>\r\n'
resp += error_resp
resp +='</resume>\r\n'
return resp
else:
resp += _inline_response_builder(resp,request,candidate,cand_resume_data,res_fn)
elif parsed_data.create_time < year_ago:
(cand_resume_data,error_resp) = _resume_parser(cont_type,data,'')
if error_resp != '':
resp +='<resume>\r\n'
resp += error_resp
resp +='</resume>\r\n'
return resp
else:
resp += _inline_response_builder(resp,request,candidate,cand_resume_data,res_fn)
else:
resp +='<errors>\r\n'
resp += _add_xml_field('errorStr','Resume upload allowed only once per year')
resp += _add_xml_field('filename','%s'%candidate.resume_filename)
resp +='</errors>\r\n'
return resp
# If there's an existing resume URL set in candidate object, delete the old file
if candidate.resume_filename:
old_filename=candidate.resume_filename
old_path = '/content/candidates/resume/%s' % old_filename
try:
os.remove(old_path)
except Exception:
pass
try:
with open(path, 'wb+') as destination:
destination.write(b64decode(data.split('#')[0]))
candidate.resume_filename = res_fn
candidate.save()
resp += _add_xml_field("success", res_fn)
if _isJobSeeker(request.user):
resp +='</resume>\r\n'
return resp
except Exception as e:
logr.info('Exception: %s' % str(e))
errors += 'Error while saving file'
resp += _add_xml_field('errors',errors)
if _isJobSeeker(request.user):
resp +='</resume>\r\n'
return resp
#End of _upload_resume()
def uploadResume(request):
logr.info("Incoming request to upload resume; user: %s" % request.user.email)
if request.method != 'POST':
return HttpResponse(INVALID_METHOD, content_type="text/xml")
candidate = Candidate.objects.get(user_id = request.user.id)
resp = '<?xml version="1.0" encoding="UTF-8"?>\r\n'
resp += _upload_resume(request,candidate)
return HttpResponse(resp, content_type="text/xml")
#End of uploadResume
| mit |
aristanetworks/neutron | neutron/api/v2/attributes.py | 5 | 33476 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
from oslo_log import log as logging
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
# TODO(watanabe.isao): A fix like in neutron/db/models_v2.py needs to be
# done in other db modules, to reuse the following constants.
# Common definitions for maximum string field length
NAME_MAX_LEN = 255
TENANT_ID_MAX_LEN = 255
DESCRIPTION_MAX_LEN = 255
DEVICE_ID_MAX_LEN = 255
DEVICE_OWNER_MAX_LEN = 255
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
LOG.debug(msg)
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed. "
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
LOG.debug(msg)
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
msg = _("'%s' Blank strings are not permitted") % data
LOG.debug(msg)
return msg
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, basestring):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if re.search(r'\s', data):
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
valid_mac = False
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if not valid_mac:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is None:
return
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
# The followings are quick checks for IPv6 (has ':') and
# IPv4. (has 3 periods like 'xx.xx.xx.xx')
# NOTE(yamamoto): netaddr uses libraries provided by the underlying
# platform to convert addresses. For example, inet_aton(3).
# Some platforms, including NetBSD and OS X, have inet_aton
# implementation which accepts more varying forms of addresses than
# we want to accept here. The following check is to reject such
# addresses. For Example:
# >>> netaddr.IPAddress('1' * 59)
# IPAddress('199.28.113.199')
# >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))
# IPAddress('199.28.113.199')
# >>>
if ':' not in data and data.count('.') != 3:
raise ValueError()
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
LOG.debug(msg)
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
return msg
def _validate_ip_or_hostname(host):
ip_err = _validate_ip_address(host)
if not ip_err:
return
name_err = _validate_hostname(host)
if not name_err:
return
msg = _("%(host)s is not a valid IP or hostname. Details: "
"%(ip_err)s, %(name_err)s") % {'ip_err': ip_err, 'host': host,
'name_err': name_err}
LOG.debug(msg)
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
hosts = []
for host in data:
# This may be an IP or a hostname
msg = _validate_ip_or_hostname(host)
if msg:
msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % {
'host': host, 'msg': msg}
LOG.debug(msg)
return msg
if host in hosts:
msg = _("Duplicate nameserver '%s'") % host
LOG.debug(msg)
return msg
hosts.append(host)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is None:
return None
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data:
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is None:
return
return _validate_subnet(data, valid_values)
def _validate_hostname(data):
# NOTE: An individual name regex instead of an entire FQDN was used
# because its easier to make correct. Feel free to replace with a
# full regex solution. The logic should validate that the hostname
# matches RFC 1123 (section 2.1) and RFC 952.
hostname_pattern = "[a-zA-Z0-9-]{1,63}$"
try:
# Trailing periods are allowed to indicate that a name is fully
# qualified per RFC 1034 (page 7).
trimmed = data if data[-1] != '.' else data[:-1]
if len(trimmed) > 255:
raise TypeError(
_("'%s' exceeds the 255 character hostname limit") % trimmed)
names = trimmed.split('.')
for name in names:
if not name:
raise TypeError(_("Encountered an empty component."))
if name[-1] == '-' or name[0] == '-':
raise TypeError(
_("Name '%s' must not start or end with a hyphen.") % name)
if not re.match(hostname_pattern, name):
raise TypeError(
_("Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen.") % name)
# RFC 1123 hints that a TLD can't be all numeric. last is a TLD if
# it's an FQDN.
if len(names) > 1 and re.match("^[0-9]+$", names[-1]):
raise TypeError(_("TLD '%s' must not be all numeric") % names[-1])
except TypeError as e:
msg = _("'%(data)s' is not a valid hostname. Reason: %(reason)s") % {
'data': data, 'reason': e.message}
LOG.debug(msg)
return msg
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is None:
return
return _validate_regex(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in key_validator.iteritems():
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
msg = _("Validator '%s' does not exist.") % k
LOG.debug(msg)
return msg
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in key_specs.iteritems()
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in key_specs.iteritems()
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, basestring):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_boolean_if_not_none(data):
if data is not None:
return convert_to_boolean(data)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_int_if_not_none(data):
if data is not None:
return convert_to_int(data)
return data
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in kvp_map.iteritems())
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__'):
return list(data)
else:
return [data]
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
NETWORK = 'network'
NETWORKS = '%ss' % NETWORK
PORT = 'port'
PORTS = '%ss' % PORT
SUBNET = 'subnet'
SUBNETS = '%ss' % SUBNET
SUBNETPOOL = 'subnetpool'
SUBNETPOOLS = '%ss' % SUBNETPOOL
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
RESOURCE_ATTRIBUTE_MAP = {
NETWORKS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': NAME_MAX_LEN},
'default': '', 'is_visible': True},
'subnets': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
},
PORTS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'mac_address': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address': None},
'enforce_policy': True,
'is_visible': True},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'convert_list_to': convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
'device_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_ID_MAX_LEN},
'default': '',
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_OWNER_MAX_LEN},
'default': '',
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SUBNETS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'subnetpool_id': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
'prefixlen': {'allow_post': True,
'allow_put': False,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'is_visible': False},
'cidr': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:subnet_or_none': None},
'required_by_policy': False,
'is_visible': True},
'gateway_ip': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'allocation_pools': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_pools': None},
'is_visible': True},
'dns_nameservers': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:nameservers': None},
'is_visible': True},
'host_routes': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:hostroutes': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'enable_dhcp': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'ipv6_ra_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values': constants.IPV6_MODES},
'is_visible': True},
'ipv6_address_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values':
constants.IPV6_MODES},
'is_visible': True},
SHARED: {'allow_post': False,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': False,
'required_by_policy': True,
'enforce_policy': True},
},
SUBNETPOOLS: {
'id': {'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True,
'allow_put': True,
'validate': {'type:not_empty_string': None},
'is_visible': True},
'tenant_id': {'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'prefixes': {'allow_post': True,
'allow_put': True,
'validate': {'type:subnet_list': None},
'is_visible': True},
'default_quota': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'ip_version': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'default_prefixlen': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'min_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
'max_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
}
}
# Identify the attribute used by a resource to reference another resource
RESOURCE_FOREIGN_KEYS = {
NETWORKS: 'network_id'
}
PLURALS = {NETWORKS: NETWORK,
PORTS: PORT,
SUBNETS: SUBNET,
SUBNETPOOLS: SUBNETPOOL,
'dns_nameservers': 'dns_nameserver',
'host_routes': 'host_route',
'allocation_pools': 'allocation_pool',
'fixed_ips': 'fixed_ip',
'extensions': 'extension'}
| apache-2.0 |
clawpack/clawpack-4.x | book/chap23/acoustics/1drad/setplot.py | 2 | 2339 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for q[0]
plotfigure = plotdata.new_plotfigure(name='q[0]', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'q[0]'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = '-o'
plotitem.color = 'b'
plotitem.show = True # show on plot?
# Figure for q[1]
plotfigure = plotdata.new_plotfigure(name='q[1]', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'q[1]'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 1
plotitem.plotstyle = '-o'
plotitem.color = 'b'
plotitem.show = True # show on plot?
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
| bsd-3-clause |
grembo/buildbot | master/buildbot/db/masters.py | 11 | 3882 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import sqlalchemy as sa
from twisted.internet import reactor
from buildbot.db import base
from buildbot.util import epoch2datetime
class MasterDict(dict):
pass
class MastersConnectorComponent(base.DBConnectorComponent):
data2db = {"masterid": "id", "link": "id"}
def findMasterId(self, name, _reactor=reactor):
tbl = self.db.model.masters
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name == name),
insert_values=dict(
name=name,
name_hash=self.hashColumns(name),
active=0, # initially inactive
last_active=_reactor.seconds()
))
def setMasterState(self, masterid, active, _reactor=reactor):
def thd(conn):
tbl = self.db.model.masters
whereclause = (tbl.c.id == masterid)
# get the old state
r = conn.execute(sa.select([tbl.c.active],
whereclause=whereclause))
rows = r.fetchall()
r.close()
if not rows:
return False # can't change a row that doesn't exist..
was_active = bool(rows[0].active)
if not active:
# if we're marking inactive, then delete any links to this
# master
sch_mst_tbl = self.db.model.scheduler_masters
q = sch_mst_tbl.delete(
whereclause=(sch_mst_tbl.c.masterid == masterid))
conn.execute(q)
# set the state (unconditionally, just to be safe)
q = tbl.update(whereclause=whereclause)
q = q.values(active=1 if active else 0)
if active:
q = q.values(last_active=_reactor.seconds())
conn.execute(q)
# return True if there was a change in state
return was_active != bool(active)
return self.db.pool.do(thd)
def getMaster(self, masterid):
def thd(conn):
tbl = self.db.model.masters
res = conn.execute(tbl.select(
whereclause=(tbl.c.id == masterid)))
row = res.fetchone()
rv = None
if row:
rv = self._masterdictFromRow(row)
res.close()
return rv
return self.db.pool.do(thd)
def getMasters(self):
def thd(conn):
tbl = self.db.model.masters
return [
self._masterdictFromRow(row)
for row in conn.execute(tbl.select()).fetchall()]
return self.db.pool.do(thd)
def setAllMastersActiveLongTimeAgo(self, _reactor=reactor):
def thd(conn):
tbl = self.db.model.masters
q = tbl.update().values(active=1, last_active=0)
conn.execute(q)
return self.db.pool.do(thd)
def _masterdictFromRow(self, row):
return MasterDict(id=row.id, name=row.name,
active=bool(row.active),
last_active=epoch2datetime(row.last_active))
| gpl-2.0 |
oesteban/dipy | dipy/align/tests/test_sumsqdiff.py | 5 | 25515 | import numpy as np
from dipy.align import floating
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_array_almost_equal,
assert_allclose)
import dipy.align.sumsqdiff as ssd
def iterate_residual_field_ssd_2d(delta_field, sigmasq_field, grad, target,
lambda_param, dfield):
r"""
This implementation is for testing purposes only. The problem
with Gauss-Seidel iterations is that it depends on the order
in which we iterate over the variables, so it is necessary to
replicate the implementation under test.
"""
nrows, ncols = delta_field.shape
if target is None:
b = np.zeros_like(grad)
b[...,0] = delta_field * grad[..., 0]
b[...,1] = delta_field * grad[..., 1]
else:
b = target
y = np.zeros(2)
A = np.ndarray((2,2))
for r in range(nrows):
for c in range(ncols):
delta = delta_field[r, c]
sigmasq = sigmasq_field[r, c] if sigmasq_field is not None else 1
#This has to be done inside the neste loops because
#some d[...] may have been previously modified
nn = 0
y[:] = 0
for (dRow, dCol) in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
dr = r + dRow
if((dr < 0) or (dr >= nrows)):
continue
dc = c + dCol
if((dc < 0) or (dc >= ncols)):
continue
nn += 1
y += dfield[dr, dc]
if np.isinf(sigmasq):
dfield[r, c] = y / nn
else:
tau = sigmasq * lambda_param * nn
A = np.outer(grad[r, c], grad[r, c]) + tau * np.eye(2)
det = np.linalg.det(A)
if(det < 1e-9):
nrm2 = np.sum(grad[r, c]**2)
if(nrm2 < 1e-9):
dfield[r, c,:] = 0
else:
dfield[r, c] = b[r,c] / nrm2
else:
y = b[r,c] + sigmasq * lambda_param * y
dfield[r, c] = np.linalg.solve(A, y)
def iterate_residual_field_ssd_3d(delta_field, sigmasq_field, grad, target,
lambda_param, dfield):
r"""
This implementation is for testing purposes only. The problem
with Gauss-Seidel iterations is that it depends on the order
in which we iterate over the variables, so it is necessary to
replicate the implementation under test.
"""
nslices, nrows, ncols = delta_field.shape
if target is None:
b = np.zeros_like(grad)
for i in range(3):
b[...,i] = delta_field * grad[..., i]
else:
b = target
y = np.ndarray((3,))
for s in range(nslices):
for r in range(nrows):
for c in range(ncols):
g = grad[s, r, c]
delta = delta_field[s, r, c]
sigmasq = sigmasq_field[s, r, c] if sigmasq_field is not None else 1
nn = 0
y[:] = 0
for dSlice, dRow, dCol in [(-1, 0, 0), (0, -1, 0), (0, 0, 1),
(0, 1, 0), (0, 0, -1), (1, 0, 0)]:
ds = s + dSlice
if((ds < 0) or (ds >= nslices)):
continue
dr = r + dRow
if((dr < 0) or (dr >= nrows)):
continue
dc = c + dCol
if((dc < 0) or (dc >= ncols)):
continue
nn += 1
y += dfield[ds, dr, dc]
if(np.isinf(sigmasq)):
dfield[s, r, c] = y / nn
elif(sigmasq < 1e-9):
nrm2 = np.sum(g**2)
if(nrm2 < 1e-9):
dfield[s, r, c, :] = 0
else:
dfield[s, r, c, :] = b[s, r, c] / nrm2
else:
tau = sigmasq * lambda_param * nn
y = b[s, r, c] + sigmasq * lambda_param * y
G = np.outer(g, g) + tau*np.eye(3)
try:
dfield[s, r, c] = np.linalg.solve(G, y)
except np.linalg.linalg.LinAlgError as err:
nrm2 = np.sum(g**2)
if(nrm2 < 1e-9):
dfield[s, r, c, :] = 0
else:
dfield[s, r, c] = b[s, r, c] / nrm2
def test_compute_residual_displacement_field_ssd_2d():
#Select arbitrary images' shape (same shape for both images)
sh = (20, 10)
#Select arbitrary centers
c_f = np.asarray(sh)/2
c_g = c_f + 0.5
#Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
X = np.ndarray(sh + (2,), dtype = np.float64)
O = np.ones(sh)
X[...,0]= x_0[:, None] * O
X[...,1]= x_1[None, :] * O
#Compute the gradient fields of F and G
np.random.seed(5512751)
grad_F = X - c_f
grad_G = X - c_g
Fnoise = np.random.ranf(np.size(grad_F)).reshape(grad_F.shape) * grad_F.max() * 0.1
Fnoise = Fnoise.astype(floating)
grad_F += Fnoise
Gnoise = np.random.ranf(np.size(grad_G)).reshape(grad_G.shape) * grad_G.max() * 0.1
Gnoise = Gnoise.astype(floating)
grad_G += Gnoise
#The squared norm of grad_G
sq_norm_grad_G = np.sum(grad_G**2,-1)
#Compute F and G
F = 0.5*np.sum(grad_F**2,-1)
G = 0.5*sq_norm_grad_G
Fnoise = np.random.ranf(np.size(F)).reshape(F.shape) * F.max() * 0.1
Fnoise = Fnoise.astype(floating)
F += Fnoise
Gnoise = np.random.ranf(np.size(G)).reshape(G.shape) * G.max() * 0.1
Gnoise = Gnoise.astype(floating)
G += Gnoise
delta_field = np.array(F - G, dtype = floating)
sigma_field = np.random.randn(delta_field.size).reshape(delta_field.shape)
sigma_field = sigma_field.astype(floating)
#Select some pixels to force sigma_field = infinite
inf_sigma = np.random.randint(0, 2, sh[0]*sh[1])
inf_sigma = inf_sigma.reshape(sh)
sigma_field[inf_sigma == 1] = np.inf
#Select an initial displacement field
d = np.random.randn(grad_G.size).reshape(grad_G.shape).astype(floating)
#d = np.zeros_like(grad_G, dtype=floating)
lambda_param = 1.5
#Implementation under test
iut = ssd.compute_residual_displacement_field_ssd_2d
#In the first iteration we test the case target=None
#In the second iteration, target is not None
target = None
rtol = 1e-9
atol = 1e-4
for it in range(2):
# Sum of differences with the neighbors
s = np.zeros_like(d, dtype = np.float64)
s[:,:-1] += d[:,:-1] - d[:,1:]#right
s[:,1:] += d[:,1:] - d[:,:-1]#left
s[:-1,:] += d[:-1,:] - d[1:,:]#down
s[1:,:] += d[1:,:] - d[:-1,:]#up
s *= lambda_param
# Dot product of displacement and gradient
dp = d[...,0]*grad_G[...,0] + \
d[...,1]*grad_G[...,1]
dp = dp.astype(np.float64)
# Compute expected residual
expected = None
if target is None:
expected = np.zeros_like(grad_G)
expected[...,0] = delta_field*grad_G[...,0]
expected[...,1] = delta_field*grad_G[...,1]
else:
expected = target.copy().astype(np.float64)
# Expected residuals when sigma != infinte
expected[inf_sigma==0,0] -= grad_G[inf_sigma==0, 0] * dp[inf_sigma==0] + \
sigma_field[inf_sigma==0] * s[inf_sigma==0, 0]
expected[inf_sigma==0,1] -= grad_G[inf_sigma==0, 1] * dp[inf_sigma==0] + \
sigma_field[inf_sigma==0] * s[inf_sigma==0, 1]
# Expected residuals when sigma == infinte
expected[inf_sigma==1] = -1.0 * s[inf_sigma==1]
# Test residual field computation starting with residual = None
actual = iut(delta_field, sigma_field, grad_G.astype(floating),
target, lambda_param, d, None)
assert_allclose(actual, expected, rtol = rtol, atol = atol)
actual = np.ndarray(actual.shape, dtype=floating) #destroy previous result
# Test residual field computation starting with residual is not None
iut(delta_field, sigma_field, grad_G.astype(floating),
target, lambda_param, d, actual)
assert_allclose(actual, expected, rtol = rtol, atol = atol)
# Set target for next iteration
target = actual
# Test Gauss-Seidel step with residual=None and residual=target
for residual in [None, target]:
expected = d.copy()
iterate_residual_field_ssd_2d(delta_field, sigma_field,
grad_G.astype(floating), residual, lambda_param, expected)
actual = d.copy()
ssd.iterate_residual_displacement_field_ssd_2d(delta_field,
sigma_field, grad_G.astype(floating), residual, lambda_param, actual)
assert_allclose(actual, expected, rtol = rtol, atol = atol)
def test_compute_residual_displacement_field_ssd_3d():
#Select arbitrary images' shape (same shape for both images)
sh = (20, 15, 10)
#Select arbitrary centers
c_f = np.asarray(sh)/2
c_g = c_f + 0.5
#Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
x_2 = np.asarray(range(sh[2]))
X = np.ndarray(sh + (3,), dtype = np.float64)
O = np.ones(sh)
X[...,0]= x_0[:, None, None] * O
X[...,1]= x_1[None, :, None] * O
X[...,2]= x_2[None, None, :] * O
#Compute the gradient fields of F and G
np.random.seed(9223102)
grad_F = X - c_f
grad_G = X - c_g
Fnoise = np.random.ranf(np.size(grad_F)).reshape(grad_F.shape) * grad_F.max() * 0.1
Fnoise = Fnoise.astype(floating)
grad_F += Fnoise
Gnoise = np.random.ranf(np.size(grad_G)).reshape(grad_G.shape) * grad_G.max() * 0.1
Gnoise = Gnoise.astype(floating)
grad_G += Gnoise
#The squared norm of grad_G
sq_norm_grad_G = np.sum(grad_G**2,-1)
#Compute F and G
F = 0.5*np.sum(grad_F**2,-1)
G = 0.5*sq_norm_grad_G
Fnoise = np.random.ranf(np.size(F)).reshape(F.shape) * F.max() * 0.1
Fnoise = Fnoise.astype(floating)
F += Fnoise
Gnoise = np.random.ranf(np.size(G)).reshape(G.shape) * G.max() * 0.1
Gnoise = Gnoise.astype(floating)
G += Gnoise
delta_field = np.array(F - G, dtype = floating)
sigma_field = np.random.randn(delta_field.size).reshape(delta_field.shape)
sigma_field = sigma_field.astype(floating)
#Select some pixels to force sigma_field = infinite
inf_sigma = np.random.randint(0, 2, sh[0]*sh[1]*sh[2])
inf_sigma = inf_sigma.reshape(sh)
sigma_field[inf_sigma == 1] = np.inf
#Select an initial displacement field
d = np.random.randn(grad_G.size).reshape(grad_G.shape).astype(floating)
#d = np.zeros_like(grad_G, dtype=floating)
lambda_param = 1.5
#Implementation under test
iut = ssd.compute_residual_displacement_field_ssd_3d
#In the first iteration we test the case target=None
#In the second iteration, target is not None
target = None
rtol = 1e-9
atol = 1e-4
for it in range(2):
# Sum of differences with the neighbors
s = np.zeros_like(d, dtype = np.float64)
s[:,:,:-1] += d[:,:,:-1] - d[:,:,1:]#right
s[:,:,1:] += d[:,:,1:] - d[:,:,:-1]#left
s[:,:-1,:] += d[:,:-1,:] - d[:,1:,:]#down
s[:,1:,:] += d[:,1:,:] - d[:,:-1,:]#up
s[:-1,:,:] += d[:-1,:,:] - d[1:,:,:]#below
s[1:,:,:] += d[1:,:,:] - d[:-1,:,:]#above
s *= lambda_param
# Dot product of displacement and gradient
dp = d[...,0]*grad_G[...,0] + \
d[...,1]*grad_G[...,1] + \
d[...,2]*grad_G[...,2]
# Compute expected residual
expected = None
if target is None:
expected = np.zeros_like(grad_G)
for i in range(3):
expected[...,i] = delta_field*grad_G[...,i]
else:
expected = target.copy().astype(np.float64)
# Expected residuals when sigma != infinte
for i in range(3):
expected[inf_sigma==0,i] -= grad_G[inf_sigma==0, i] * dp[inf_sigma==0] + \
sigma_field[inf_sigma==0] * s[inf_sigma==0, i]
# Expected residuals when sigma == infinte
expected[inf_sigma==1] = -1.0 * s[inf_sigma==1]
# Test residual field computation starting with residual = None
actual = iut(delta_field, sigma_field, grad_G.astype(floating),
target, lambda_param, d, None)
assert_allclose(actual, expected, rtol = rtol, atol = atol)
actual = np.ndarray(actual.shape, dtype=floating) #destroy previous result
# Test residual field computation starting with residual is not None
iut(delta_field, sigma_field, grad_G.astype(floating),
target, lambda_param, d, actual)
assert_allclose(actual, expected, rtol = rtol, atol = atol)
# Set target for next iteration
target = actual
# Test Gauss-Seidel step with residual=None and residual=target
for residual in [None, target]:
expected = d.copy()
iterate_residual_field_ssd_3d(delta_field, sigma_field,
grad_G.astype(floating), residual, lambda_param, expected)
actual = d.copy()
ssd.iterate_residual_displacement_field_ssd_3d(delta_field,
sigma_field, grad_G.astype(floating), residual, lambda_param, actual)
# the numpy linear solver may differ from our custom implementation
# we need to increase the tolerance a bit
assert_allclose(actual, expected, rtol = rtol, atol = atol*5)
def test_solve_2d_symmetric_positive_definite():
# Select some arbitrary right-hand sides
bs = [np.array([1.1, 2.2]),
np.array([1e-2, 3e-3]),
np.array([1e2, 1e3]),
np.array([1e-5, 1e5])]
# Select arbitrary symmetric positive-definite matrices
As = []
# Identity
As.append(np.array([1.0, 0.0, 1.0]))
# Small determinant
As.append(np.array([1e-3, 1e-4, 1e-3]))
# Large determinant
As.append(np.array([1e6, 1e4, 1e6]))
actual = np.zeros(2, dtype=np.float64)
for A in As:
AA = np.array([[A[0], A[1]],[A[1], A[2]]])
det = np.linalg.det(AA)
for b in bs:
expected = np.linalg.solve(AA, b)
ssd.solve_2d_symmetric_positive_definite(A, b, det, actual)
assert_allclose(expected, actual, rtol = 1e-9, atol = 1e-9)
def test_solve_3d_symmetric_positive_definite():
# Select some arbitrary right-hand sides
bs = [np.array([1.1, 2.2, 3.3]),
np.array([1e-2, 3e-3, 2e-2]),
np.array([1e2, 1e3, 5e-2]),
np.array([1e-5, 1e5, 1.0])]
# Select arbitrary taus
taus = [0.0, 1.0, 1e-4, 1e5]
# Select arbitrary matrices
gs = []
# diagonal
gs.append(np.array([0.0, 0.0, 0.0]))
# canonical basis
gs.append(np.array([1.0, 0.0, 0.0]))
gs.append(np.array([0.0, 1.0, 0.0]))
gs.append(np.array([0.0, 0.0, 1.0]))
# other
gs.append(np.array([1.0, 0.5, 0.0]))
gs.append(np.array([0.0, 0.2, 0.1]))
gs.append(np.array([0.3, 0.0, 0.9]))
actual = np.zeros(3)
for g in gs:
A = g[:,None]*g[None,:]
for tau in taus:
AA = A + tau * np.eye(3)
for b in bs:
is_singular = ssd.solve_3d_symmetric_positive_definite(g, b, tau, actual)
if tau == 0.0:
assert_equal(is_singular, 1)
else:
expected = np.linalg.solve(AA, b)
assert_allclose(expected, actual, rtol = 1e-9, atol = 1e-9)
def test_compute_energy_ssd_2d():
sh = (32, 32)
#Select arbitrary centers
c_f = np.asarray(sh)/2
c_g = c_f + 0.5
#Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
X = np.ndarray(sh + (2,), dtype = np.float64)
O = np.ones(sh)
X[...,0]= x_0[:, None] * O
X[...,1]= x_1[None, :] * O
#Compute the gradient fields of F and G
grad_F = X - c_f
grad_G = X - c_g
#Compute F and G
F = 0.5*np.sum(grad_F**2,-1)
G = 0.5*np.sum(grad_G**2,-1)
# Note: this should include the energy corresponding to the
# regularization term, but it is discarded in ANTS (they just
# consider the data term, which is not the objective function
# being optimized). This test case should be updated after
# further investigation
expected = ((F - G)**2).sum()
actual = ssd.compute_energy_ssd_2d(np.array(F-G, dtype = floating))
assert_almost_equal(expected, actual)
def test_compute_energy_ssd_3d():
sh = (32, 32, 32)
#Select arbitrary centers
c_f = np.asarray(sh)/2
c_g = c_f + 0.5
#Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
x_2 = np.asarray(range(sh[2]))
X = np.ndarray(sh + (3,), dtype = np.float64)
O = np.ones(sh)
X[...,0]= x_0[:, None, None] * O
X[...,1]= x_1[None, :, None] * O
X[...,2]= x_2[None, None, :] * O
#Compute the gradient fields of F and G
grad_F = X - c_f
grad_G = X - c_g
#Compute F and G
F = 0.5*np.sum(grad_F**2,-1)
G = 0.5*np.sum(grad_G**2,-1)
# Note: this should include the energy corresponding to the
# regularization term, but it is discarded in ANTS (they just
# consider the data term, which is not the objective function
# being optimized). This test case should be updated after
# further investigating
expected = ((F - G)**2).sum()
actual = ssd.compute_energy_ssd_3d(np.array(F-G, dtype = floating))
assert_almost_equal(expected, actual)
def test_compute_ssd_demons_step_2d():
r"""
Compares the output of the demons step in 2d against an analytical
step. The fixed image is given by $F(x) = \frac{1}{2}||x - c_f||^2$, the
moving image is given by $G(x) = \frac{1}{2}||x - c_g||^2$,
$x, c_f, c_g \in R^{2}$
References
----------
[Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.
(2009). Diffeomorphic demons: efficient non-parametric
image registration. NeuroImage, 45(1 Suppl), S61-72.
doi:10.1016/j.neuroimage.2008.10.040
"""
#Select arbitrary images' shape (same shape for both images)
sh = (20, 10)
#Select arbitrary centers
c_f = np.asarray(sh)/2
c_g = c_f + 0.5
#Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
X = np.ndarray(sh + (2,), dtype = np.float64)
O = np.ones(sh)
X[...,0]= x_0[:, None] * O
X[...,1]= x_1[None, :] * O
#Compute the gradient fields of F and G
np.random.seed(1137271)
grad_F = X - c_f
grad_G = X - c_g
Fnoise = np.random.ranf(np.size(grad_F)).reshape(grad_F.shape) * grad_F.max() * 0.1
Fnoise = Fnoise.astype(floating)
grad_F += Fnoise
Gnoise = np.random.ranf(np.size(grad_G)).reshape(grad_G.shape) * grad_G.max() * 0.1
Gnoise = Gnoise.astype(floating)
grad_G += Gnoise
#The squared norm of grad_G to be used later
sq_norm_grad_G = np.sum(grad_G**2,-1)
#Compute F and G
F = 0.5*np.sum(grad_F**2,-1)
G = 0.5*sq_norm_grad_G
Fnoise = np.random.ranf(np.size(F)).reshape(F.shape) * F.max() * 0.1
Fnoise = Fnoise.astype(floating)
F += Fnoise
Gnoise = np.random.ranf(np.size(G)).reshape(G.shape) * G.max() * 0.1
Gnoise = Gnoise.astype(floating)
G += Gnoise
delta_field = np.array(G - F, dtype = floating)
#Select some pixels to force gradient = 0 and F=G
random_labels = np.random.randint(0, 2, sh[0]*sh[1])
random_labels = random_labels.reshape(sh)
F[random_labels == 0] = G[random_labels == 0]
delta_field[random_labels == 0] = 0
grad_G[random_labels == 0, ...] = 0
sq_norm_grad_G[random_labels == 0, ...] = 0
#Set arbitrary values for $\sigma_i$ (eq. 4 in [Vercauteren09])
#The original Demons algorithm used simply |F(x) - G(x)| as an
#estimator, so let's use it as well
sigma_i_sq = (F - G)**2
#Now select arbitrary parameters for $\sigma_x$ (eq 4 in [Vercauteren09])
for sigma_x_sq in [0.01, 1.5, 4.2]:
#Directly compute the demons step according to eq. 4 in [Vercauteren09]
num = (sigma_x_sq * (F - G))[random_labels == 1]
den = (sigma_x_sq * sq_norm_grad_G + sigma_i_sq)[random_labels == 1]
expected = (-1 * np.array(grad_G)) #This is $J^{P}$ in eq. 4 [Vercauteren09]
expected[random_labels == 1, 0] *= num / den
expected[random_labels == 1, 1] *= num / den
expected[random_labels == 0, ...] = 0
#Now compute it using the implementation under test
actual = np.empty_like(expected, dtype=floating)
ssd.compute_ssd_demons_step_2d(delta_field,
np.array(grad_G, dtype=floating),
sigma_x_sq,
actual)
assert_array_almost_equal(actual, expected)
def test_compute_ssd_demons_step_3d():
r"""
Compares the output of the demons step in 3d against an analytical
step. The fixed image is given by $F(x) = \frac{1}{2}||x - c_f||^2$, the
moving image is given by $G(x) = \frac{1}{2}||x - c_g||^2$,
$x, c_f, c_g \in R^{3}$
References
----------
[Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.
(2009). Diffeomorphic demons: efficient non-parametric
image registration. NeuroImage, 45(1 Suppl), S61-72.
doi:10.1016/j.neuroimage.2008.10.040
"""
#Select arbitrary images' shape (same shape for both images)
sh = (20, 15, 10)
#Select arbitrary centers
c_f = np.asarray(sh)/2
c_g = c_f + 0.5
#Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
x_2 = np.asarray(range(sh[2]))
X = np.ndarray(sh + (3,), dtype = np.float64)
O = np.ones(sh)
X[...,0]= x_0[:, None, None] * O
X[...,1]= x_1[None, :, None] * O
X[...,2]= x_2[None, None, :] * O
#Compute the gradient fields of F and G
np.random.seed(1137271)
grad_F = X - c_f
grad_G = X - c_g
Fnoise = np.random.ranf(np.size(grad_F)).reshape(grad_F.shape) * grad_F.max() * 0.1
Fnoise = Fnoise.astype(floating)
grad_F += Fnoise
Gnoise = np.random.ranf(np.size(grad_G)).reshape(grad_G.shape) * grad_G.max() * 0.1
Gnoise = Gnoise.astype(floating)
grad_G += Gnoise
#The squared norm of grad_G to be used later
sq_norm_grad_G = np.sum(grad_G**2,-1)
#Compute F and G
F = 0.5*np.sum(grad_F**2,-1)
G = 0.5*sq_norm_grad_G
Fnoise = np.random.ranf(np.size(F)).reshape(F.shape) * F.max() * 0.1
Fnoise = Fnoise.astype(floating)
F += Fnoise
Gnoise = np.random.ranf(np.size(G)).reshape(G.shape) * G.max() * 0.1
Gnoise = Gnoise.astype(floating)
G += Gnoise
delta_field = np.array(G - F, dtype = floating)
#Select some pixels to force gradient = 0 and F=G
random_labels = np.random.randint(0, 2, sh[0]*sh[1]*sh[2])
random_labels = random_labels.reshape(sh)
F[random_labels == 0] = G[random_labels == 0]
delta_field[random_labels == 0] = 0
grad_G[random_labels == 0, ...] = 0
sq_norm_grad_G[random_labels == 0, ...] = 0
#Set arbitrary values for $\sigma_i$ (eq. 4 in [Vercauteren09])
#The original Demons algorithm used simply |F(x) - G(x)| as an
#estimator, so let's use it as well
sigma_i_sq = (F - G)**2
#Now select arbitrary parameters for $\sigma_x$ (eq 4 in [Vercauteren09])
for sigma_x_sq in [0.01, 1.5, 4.2]:
#Directly compute the demons step according to eq. 4 in [Vercauteren09]
num = (sigma_x_sq * (F - G))[random_labels == 1]
den = (sigma_x_sq * sq_norm_grad_G + sigma_i_sq)[random_labels == 1]
expected = (-1 * np.array(grad_G)) #This is $J^{P}$ in eq. 4 [Vercauteren09]
expected[random_labels == 1, 0] *= num / den
expected[random_labels == 1, 1] *= num / den
expected[random_labels == 1, 2] *= num / den
expected[random_labels == 0, ...] = 0
#Now compute it using the implementation under test
actual = np.empty_like(expected, dtype=floating)
ssd.compute_ssd_demons_step_3d(delta_field,
np.array(grad_G, dtype = floating),
sigma_x_sq,
actual)
assert_array_almost_equal(actual, expected)
if __name__=='__main__':
test_compute_residual_displacement_field_ssd_2d()
test_compute_residual_displacement_field_ssd_3d()
test_compute_energy_ssd_2d()
test_compute_energy_ssd_3d()
test_compute_ssd_demons_step_2d()
test_compute_ssd_demons_step_3d()
| bsd-3-clause |
zentralopensource/zentral | zentral/conf/config.py | 1 | 9328 | import base64
import itertools
import json
import logging
import os
import re
import time
from .buckets import get_bucket_client
from .params import get_param_client
from .secrets import get_secret_client
logger = logging.getLogger("zentral.conf.config")
class Proxy:
pass
class EnvProxy(Proxy):
def __init__(self, name):
self._name = name
def get(self):
return os.environ[self._name]
class ResolverMethodProxy(Proxy):
def __init__(self, resolver, proxy_type, key):
if proxy_type == "file":
self._method = resolver.get_file_content
elif proxy_type == "param":
self._method = resolver.get_parameter_value
elif proxy_type == "secret":
self._method = resolver.get_secret_value
elif proxy_type == "bucket_file":
self._method = resolver.get_bucket_file
else:
raise ValueError("Unknown proxy type %s", proxy_type)
self._key = key
def get(self):
return self._method(self._key)
class JSONDecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return json.loads(self._child_proxy.get())
class Base64DecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return base64.b64decode(self._child_proxy.get())
class ElementFilter(Proxy):
def __init__(self, key, child_proxy):
try:
self._key = int(key)
except ValueError:
self._key = key
self._child_proxy = child_proxy
def get(self):
return self._child_proxy.get()[self._key]
class Resolver:
def __init__(self):
self._cache = {}
self._bucket_client = None
self._param_client = None
self._secret_client = None
def _get_or_create_cached_value(self, key, getter, ttl=None):
# happy path
try:
expiry, value = self._cache[key]
except KeyError:
pass
else:
if expiry is None or time.time() < expiry:
logger.debug("Key %s from cache", key)
return value
logger.debug("Cache for key %s has expired", key)
# get value
value = getter()
if ttl:
expiry = time.time() + ttl
else:
expiry = None
self._cache[key] = (expiry, value)
logger.debug("Set cache for key %s", key)
return value
def get_file_content(self, filepath):
cache_key = ("FILE", filepath)
def getter():
with open(filepath, "r") as f:
return f.read()
return self._get_or_create_cached_value(cache_key, getter)
def get_secret_value(self, name):
cache_key = ("SECRET", name)
if not self._secret_client:
self._secret_client = get_secret_client()
def getter():
return self._secret_client.get(name)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
def get_bucket_file(self, key):
cache_key = ("BUCKET_FILE", key)
if not self._bucket_client:
self._bucket_client = get_bucket_client()
def getter():
return self._bucket_client.download_to_tmpfile(key)
return self._get_or_create_cached_value(cache_key, getter)
def get_parameter_value(self, key):
cache_key = ("PARAM", key)
if not self._param_client:
self._param_client = get_param_client()
def getter():
return self._param_client.get(key)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
class BaseConfig:
PROXY_VAR_RE = re.compile(
r"^\{\{\s*"
r"(?P<type>bucket_file|env|file|param|secret)\:(?P<key>[^\}\|]+)"
r"(?P<filters>(\s*\|\s*(jsondecode|base64decode|element:[a-zA-Z_\-/0-9]+))*)"
r"\s*\}\}$"
)
custom_classes = {}
def __init__(self, path=None, resolver=None):
self._path = path or ()
if not resolver:
resolver = Resolver()
self._resolver = resolver
def _make_proxy(self, key, match):
proxy_type = match.group("type")
key = match.group("key").strip()
if proxy_type == "env":
proxy = EnvProxy(key)
else:
proxy = ResolverMethodProxy(self._resolver, proxy_type, key)
filters = [f for f in [rf.strip() for rf in match.group("filters").split("|")] if f]
for filter_name in filters:
if filter_name == "jsondecode":
proxy = JSONDecodeFilter(proxy)
elif filter_name == "base64decode":
proxy = Base64DecodeFilter(proxy)
elif filter_name.startswith("element:"):
key = filter_name.split(":", 1)[-1]
proxy = ElementFilter(key, proxy)
else:
raise ValueError("Unknown filter %s", filter_name)
return proxy
def _from_python(self, key, value):
new_path = self._path + (key,)
if isinstance(value, dict):
value = self.custom_classes.get(new_path, ConfigDict)(value, new_path)
elif isinstance(value, list):
value = self.custom_classes.get(new_path, ConfigList)(value, new_path)
elif isinstance(value, str):
match = self.PROXY_VAR_RE.match(value)
if match:
value = self._make_proxy(key, match)
return value
def _to_python(self, value):
if isinstance(value, Proxy):
return value.get()
else:
return value
def __len__(self):
return len(self._collection)
def __delitem__(self, key):
del self._collection[key]
def __setitem__(self, key, value):
self._collection[key] = self._from_python(key, value)
def pop(self, key, default=None):
value = self._collection.pop(key, default)
if isinstance(value, Proxy):
value = value.get()
return value
class ConfigList(BaseConfig):
def __init__(self, config_l, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = []
for key, value in enumerate(config_l):
self._collection.append(self._from_python(str(key), value))
def __getitem__(self, key):
value = self._collection[key]
if isinstance(key, slice):
slice_repr = ":".join(str("" if i is None else i) for i in (key.start, key.stop, key.step))
logger.debug("Get /%s[%s] config key", "/".join(self._path), slice_repr)
return [self._to_python(item) for item in value]
else:
logger.debug("Get /%s[%s] config key", "/".join(self._path), key)
return self._to_python(value)
def __iter__(self):
for element in self._collection:
yield self._to_python(element)
def serialize(self):
s = []
for v in self:
if isinstance(v, BaseConfig):
v = v.serialize()
s.append(v)
return s
class ConfigDict(BaseConfig):
def __init__(self, config_d, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = {}
for key, value in config_d.items():
self._collection[key] = self._from_python(key, value)
def __getitem__(self, key):
logger.debug("Get /%s config key", "/".join(self._path + (key,)))
value = self._collection[key]
return self._to_python(value)
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = self._to_python(default)
return value
def __iter__(self):
yield from self._collection
def keys(self):
return self._collection.keys()
def values(self):
for value in self._collection.values():
yield self._to_python(value)
def items(self):
for key, value in self._collection.items():
yield key, self._to_python(value)
def clear(self):
return self._collection.clear()
def setdefault(self, key, default=None):
return self._collection.setdefault(key, self._from_python(key, default))
def pop(self, key, default=None):
value = self._collection.pop(key, default)
return self._to_python(value)
def popitem(self):
key, value = self._collection.popitem()
return key, self._to_python(value)
def copy(self):
return ConfigDict(self._collection.copy(), path=self._path, resolver=self._resolver)
def update(self, *args, **kwargs):
chain = []
for arg in args:
if isinstance(arg, dict):
iterator = arg.items()
else:
iterator = arg
chain = itertools.chain(chain, iterator)
if kwargs:
chain = itertools.chain(chain, kwargs.items())
for key, value in iterator:
self._collection[key] = self._from_python(key, value)
def serialize(self):
s = {}
for k, v in self.items():
if isinstance(v, BaseConfig):
v = v.serialize()
s[k] = v
return s
| apache-2.0 |
andmos/ansible | lib/ansible/modules/network/f5/bigip_dns_nameserver.py | 14 | 14792 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_dns_nameserver
short_description: Manage LTM DNS nameservers on a BIG-IP
description:
- Manages LTM DNS nameservers on a BIG-IP. These nameservers form part of what is
known as DNS Express on a BIG-IP. This module does not configure GTM related
functionality, nor does it configure system-level name servers that affect the
base system's ability to resolve DNS names.
version_added: 2.8
options:
name:
description:
- Specifies the name of the nameserver.
required: True
address:
description:
- Specifies the IP address on which the DNS nameserver (client) or back-end DNS
authoritative server (DNS Express server) listens for DNS messages.
- When creating a new nameserver, if this value is not specified, the default
is C(127.0.0.1).
service_port:
description:
- Specifies the service port on which the DNS nameserver (client) or back-end DNS
authoritative server (DNS Express server) listens for DNS messages.
- When creating a new nameserver, if this value is not specified, the default
is C(53).
route_domain:
description:
- Specifies the local route domain that the DNS nameserver (client) or back-end
DNS authoritative server (DNS Express server) uses for outbound traffic.
- When creating a new nameserver, if this value is not specified, the default
is C(0).
tsig_key:
description:
- Specifies the TSIG key the system uses to communicate with this DNS nameserver
(client) or back-end DNS authoritative server (DNS Express server) for AXFR zone
transfers.
- If the nameserver is a client, then the system uses this TSIG key to verify the
request and sign the response.
- If this nameserver is a DNS Express server, then this TSIG key must match the
TSIG key for the zone on the back-end DNS authoritative server.
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
default: present
choices:
- present
- absent
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a nameserver
bigip_dns_nameserver:
name: foo
address: 10.10.10.10
service_port: 53
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
address:
description: Address which the nameserver listens for DNS messages.
returned: changed
type: str
sample: 127.0.0.1
service_port:
description: Service port on which the nameserver listens for DNS messages.
returned: changed
type: int
sample: 53
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'routeDomain': 'route_domain',
'port': 'service_port',
'tsigKey': 'tsig_key'
}
api_attributes = [
'address',
'routeDomain',
'port',
'tsigKey'
]
returnables = [
'address',
'service_port',
'route_domain',
'tsig_key',
]
updatables = [
'address',
'service_port',
'route_domain',
'tsig_key',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def tsig_key(self):
if self._values['tsig_key'] in [None, '']:
return self._values['tsig_key']
return fq_name(self.partition, self._values['tsig_key'])
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
return fq_name(self.partition, self._values['route_domain'])
@property
def service_port(self):
if self._values['service_port'] is None:
return None
try:
return int(self._values['service_port'])
except ValueError:
# Reserving the right to add well-known ports
raise F5ModuleError(
"The 'service_port' must be in numeric form."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def tsig_key(self):
if self.want.tsig_key is None:
return None
if self.have.tsig_key is None and self.want.tsig_key == '':
return None
if self.want.tsig_key != self.have.tsig_key:
return self.want.tsig_key
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.address is None:
self.want.update({'address': '127.0.0.1'})
if self.want.service_port is None:
self.want.update({'service_port': '53'})
if self.want.route_domain is None:
self.want.update({'route_domain': '/Common/0'})
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
address=dict(),
service_port=dict(),
route_domain=dict(),
tsig_key=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
runningwolf666/you-get | src/you_get/extractors/kugou.py | 19 | 1978 | #!/usr/bin/env python
__all__ = ['kugou_download']
from ..common import *
from json import loads
from base64 import b64decode
import re
import hashlib
def kugou_download(url, output_dir=".", merge=True, info_only=False):
if url.lower().find("5sing")!=-1:
#for 5sing.kugou.com
html=get_html(url)
ticket=r1(r'"ticket":\s*"(.*)"',html)
j=loads(str(b64decode(ticket),encoding="utf-8"))
url=j['file']
title=j['songName']
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
else:
#for the www.kugou.com/
return kugou_download_playlist(url, output_dir=output_dir, merge=merge, info_only=info_only)
# raise NotImplementedError(url)
def kugou_download_by_hash(title,hash_val,output_dir = '.', merge = True, info_only = False):
#sample
#url_sample:http://www.kugou.com/yy/album/single/536957.html
#hash ->key md5(hash+kgcloud")->key decompile swf
#cmd 4 for mp3 cmd 3 for m4a
key=hashlib.new('md5',(hash_val+"kgcloud").encode("utf-8")).hexdigest()
html=get_html("http://trackercdn.kugou.com/i/?pid=6&key=%s&acceptMp3=1&cmd=4&hash=%s"%(key,hash_val))
j=loads(html)
url=j['url']
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
def kugou_download_playlist(url, output_dir = '.', merge = True, info_only = False):
html=get_html(url)
pattern=re.compile('title="(.*?)".* data="(\w*)\|.*?"')
pairs=pattern.findall(html)
for title,hash_val in pairs:
kugou_download_by_hash(title,hash_val,output_dir,merge,info_only)
site_info = "kugou.com"
download = kugou_download
# download_playlist = playlist_not_supported("kugou")
download_playlist=kugou_download_playlist
| mit |
mdsafwan/Deal-My-Stuff | Lib/site-packages/django/template/response.py | 45 | 8917 | import warnings
from django.http import HttpResponse
from django.template import Context, RequestContext, Template, loader
from django.template.backends.django import Template as BackendTemplate
from django.template.context import _current_app_undefined
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
if isinstance(template, Template):
warnings.warn(
"{}'s template argument cannot be a django.template.Template "
"anymore. It may be a backend-specific template like those "
"created by get_template().".format(self.__class__.__name__),
RemovedInDjango110Warning, stacklevel=2)
template = BackendTemplate(template)
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status, charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template, using=self.using)
elif isinstance(template, six.string_types):
return loader.get_template(template, using=self.using)
else:
return template
def _resolve_template(self, template):
# This wrapper deprecates returning a django.template.Template in
# subclasses that override resolve_template. It can be removed in
# Django 1.10.
new_template = self.resolve_template(template)
if isinstance(new_template, Template):
warnings.warn(
"{}.resolve_template() must return a backend-specific "
"template like those created by get_template(), not a "
"{}.".format(
self.__class__.__name__, new_template.__class__.__name__),
RemovedInDjango110Warning, stacklevel=2)
new_template = BackendTemplate(new_template)
return new_template
def resolve_context(self, context):
return context
def _resolve_context(self, context):
# This wrapper deprecates returning a Context or a RequestContext in
# subclasses that override resolve_context. It can be removed in
# Django 1.10. If returning a Context or a RequestContext works by
# accident, it won't be an issue per se, but it won't be officially
# supported either.
new_context = self.resolve_context(context)
if isinstance(new_context, RequestContext) and self._request is None:
self._request = new_context.request
if isinstance(new_context, Context):
warnings.warn(
"{}.resolve_context() must return a dict, not a {}.".format(
self.__class__.__name__, new_context.__class__.__name__),
RemovedInDjango110Warning, stacklevel=2)
# It would be tempting to do new_context = new_context.flatten()
# here but that would cause template context processors to run for
# TemplateResponse(request, template, Context({})), which would be
# backwards-incompatible. As a consequence another deprecation
# warning will be raised when rendering the template. There isn't
# much we can do about that.
return new_context
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self._resolve_template(self.template_name)
context = self._resolve_context(self.context_data)
content = template.render(context, self._request)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, current_app=_current_app_undefined, charset=None,
using=None):
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of TemplateResponse is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango110Warning, stacklevel=2)
request.current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, charset, using)
self._request = request
| apache-2.0 |
sanuj/shogun | examples/undocumented/python_modular/kernel_top_modular.py | 26 | 2583 | #!/usr/bin/env python
from tools.load import LoadMatrix
from numpy import where
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
label_traindat = lm.load_labels('../data/label_train_dna.dat')
fm_hmm_pos=[traindat[i] for i in where([label_traindat==1])[1] ]
fm_hmm_neg=[traindat[i] for i in where([label_traindat==-1])[1] ]
parameter_list = [[traindat,testdat,label_traindat,1e-1,1,0,False,[1, False, True]], \
[traindat,testdat,label_traindat,1e-1,1,0,False,[1, False, True] ]]
def kernel_top_modular (fm_train_dna=traindat,fm_test_dna=testdat,label_train_dna=label_traindat,pseudo=1e-1,
order=1,gap=0,reverse=False,kargs=[1, False, True]):
from modshogun import StringCharFeatures, StringWordFeatures, TOPFeatures, DNA
from modshogun import PolyKernel
from modshogun import HMM, BW_NORMAL
N=1 # toy HMM with 1 state
M=4 # 4 observations -> DNA
# train HMM for positive class
charfeat=StringCharFeatures(fm_hmm_pos, DNA)
hmm_pos_train=StringWordFeatures(charfeat.get_alphabet())
hmm_pos_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
pos=HMM(hmm_pos_train, N, M, pseudo)
pos.baum_welch_viterbi_train(BW_NORMAL)
# train HMM for negative class
charfeat=StringCharFeatures(fm_hmm_neg, DNA)
hmm_neg_train=StringWordFeatures(charfeat.get_alphabet())
hmm_neg_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
neg=HMM(hmm_neg_train, N, M, pseudo)
neg.baum_welch_viterbi_train(BW_NORMAL)
# Kernel training data
charfeat=StringCharFeatures(fm_train_dna, DNA)
wordfeats_train=StringWordFeatures(charfeat.get_alphabet())
wordfeats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
# Kernel testing data
charfeat=StringCharFeatures(fm_test_dna, DNA)
wordfeats_test=StringWordFeatures(charfeat.get_alphabet())
wordfeats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)
# get kernel on training data
pos.set_observations(wordfeats_train)
neg.set_observations(wordfeats_train)
feats_train=TOPFeatures(10, pos, neg, False, False)
kernel=PolyKernel(feats_train, feats_train, *kargs)
km_train=kernel.get_kernel_matrix()
# get kernel on testing data
pos_clone=HMM(pos)
neg_clone=HMM(neg)
pos_clone.set_observations(wordfeats_test)
neg_clone.set_observations(wordfeats_test)
feats_test=TOPFeatures(10, pos_clone, neg_clone, False, False)
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print("TOP Kernel")
kernel_top_modular(*parameter_list[0])
| gpl-3.0 |
bchess/pushmanager | tests/test_servlet_delayrequest.py | 5 | 2707 | from contextlib import nested
import mock
from core import db
from core.util import get_servlet_urlspec
from servlets.delayrequest import DelayRequestServlet
import testing as T
import types
class DelayRequestServletTest(T.TestCase, T.ServletTestMixin):
@T.class_setup_teardown
def mock_servlet_env(self):
self.results = []
with nested(
mock.patch.dict(db.Settings, T.MockedSettings),
mock.patch.object(
DelayRequestServlet,
"get_current_user",
return_value="testuser"
)
):
yield
def get_handlers(self):
return [get_servlet_urlspec(DelayRequestServlet)]
def call_on_db_complete(self, req):
mocked_self = mock.Mock()
mocked_self.current_user = 'fake_pushmaster'
mocked_self.check_db_results = mock.Mock(return_value=None)
mocked_self.on_db_complete = types.MethodType(DelayRequestServlet.on_db_complete.im_func, mocked_self)
def first():
return req
mreq = mock.Mock()
mreq.first = first
mocked_self.on_db_complete('success', [mock.ANY, mock.ANY, mreq])
@mock.patch('core.mail.MailQueue.enqueue_user_email')
def test_no_watched_mailqueue_on_db_complete(self, mailq):
req = {
'user': 'testuser',
'watchers': None,
'repo': 'repo',
'branch': 'branch',
'title': 'title',
'state': 'delayed',
}
self.call_on_db_complete(req)
no_watcher_call_args = mailq.call_args_list[0][0]
T.assert_equal(['testuser'], no_watcher_call_args[0])
T.assert_in('Request for testuser', no_watcher_call_args[1])
T.assert_in('testuser - title', no_watcher_call_args[1])
T.assert_in('[push] testuser - title', no_watcher_call_args[2])
@mock.patch('core.mail.MailQueue.enqueue_user_email')
def test_watched_mailqueue_on_db_complete(self, mailq):
req = {
'user': 'testuser',
'watchers': 'testuser1,testuser2',
'repo': 'repo',
'branch': 'branch',
'title': 'title',
'state': 'delayed',
}
self.call_on_db_complete(req)
watched_call_args = mailq.call_args_list[0][0]
T.assert_equal(['testuser', 'testuser1', 'testuser2'], watched_call_args[0])
T.assert_in('Request for testuser (testuser1,testuser2)', watched_call_args[1])
T.assert_in('testuser (testuser1,testuser2) - title', watched_call_args[1])
T.assert_in('[push] testuser (testuser1,testuser2) - title', watched_call_args[2])
if __name__ == '__main__':
T.run()
| apache-2.0 |
l10n-tw/systemd | test/create-sys-script.py | 14 | 5080 | #!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
OUTFILE_HEADER = """#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# create-sys-script.py
#
# © 2017 Canonical Ltd.
# Author: Dan Streetman <dan.streetman@canonical.com>
"""
# Use this only to (re-)create the test/sys-script.py script,
# after adding or modifying anything in the test/sys/ directory
import os, sys
import stat
import tempfile
import filecmp
import subprocess
OUTFILE_MODE = 0o775
OUTFILE_FUNCS = r"""
import os, sys
import shutil
def d(path, mode):
os.mkdir(path, mode)
def l(path, src):
os.symlink(src, path)
def f(path, mode, contents):
with open(path, "wb") as f:
f.write(contents)
os.chmod(path, mode)
"""
OUTFILE_MAIN = """
if len(sys.argv) < 2:
exit("Usage: {} <target dir>".format(sys.argv[0]))
if not os.path.isdir(sys.argv[1]):
exit("Target dir {} not found".format(sys.argv[1]))
os.chdir(sys.argv[1])
if os.path.exists('sys'):
shutil.rmtree('sys')
"""
def handle_dir(outfile, path):
m = os.lstat(path).st_mode & 0o777
outfile.write(f"d('{path}', {m:#o})\n")
def handle_link(outfile, path):
src = os.readlink(path)
outfile.write(f"l('{path}', '{src}')\n")
def escape_single_quotes(b):
# remove the b'' wrapping each line repr
r = repr(b)[2:-1]
# python escapes all ' only if there are ' and " in the string
if '"' not in r:
r = r.replace("'", r"\'")
# return line with all ' escaped
return r
def handle_file(outfile, path):
m = os.lstat(path).st_mode & 0o777
with open(path, "rb") as f:
b = f.read()
if b.count(b"\n") > 1:
r = "\n".join( escape_single_quotes(l) for l in b.split(b"\n") )
r = f"b'''{r}'''"
else:
r = repr(b)
outfile.write(f"f('{path}', {m:#o}, {r})\n")
def process_sysdir(outfile):
for (dirpath, dirnames, filenames) in os.walk('sys'):
handle_dir(outfile, dirpath)
for d in dirnames:
path = os.path.join(dirpath, d)
if stat.S_ISLNK(os.lstat(path).st_mode):
handle_link(outfile, path)
for f in filenames:
path = os.path.join(dirpath, f)
mode = os.lstat(path).st_mode
if stat.S_ISLNK(mode):
handle_link(outfile, path)
elif stat.S_ISREG(mode):
handle_file(outfile, path)
def verify_dir(tmpd, path_a):
path_b = os.path.join(tmpd, path_a)
mode_a = os.lstat(path_a).st_mode
mode_b = os.lstat(path_b).st_mode
if not stat.S_ISDIR(mode_b):
raise Exception("Not directory")
if (mode_a & 0o777) != (mode_b & 0o777):
raise Exception("Permissions mismatch")
def verify_link(tmpd, path_a):
path_b = os.path.join(tmpd, path_a)
if not stat.S_ISLNK(os.lstat(path_b).st_mode):
raise Exception("Not symlink")
if os.readlink(path_a) != os.readlink(path_b):
raise Exception("Symlink dest mismatch")
def verify_file(tmpd, path_a):
path_b = os.path.join(tmpd, path_a)
mode_a = os.lstat(path_a).st_mode
mode_b = os.lstat(path_b).st_mode
if not stat.S_ISREG(mode_b):
raise Exception("Not file")
if (mode_a & 0o777) != (mode_b & 0o777):
raise Exception("Permissions mismatch")
if not filecmp.cmp(path_a, path_b, shallow=False):
raise Exception("File contents mismatch")
def verify_script(tmpd):
any = False
for (dirpath, dirnames, filenames) in os.walk("sys"):
any = True
try:
path = dirpath
verify_dir(tmpd, path)
for d in dirnames:
path = os.path.join(dirpath, d)
if stat.S_ISLNK(os.lstat(path).st_mode):
verify_link(tmpd, path)
for f in filenames:
path = os.path.join(dirpath, f)
mode = os.lstat(path).st_mode
if stat.S_ISLNK(mode):
verify_link(tmpd, path)
elif stat.S_ISREG(mode):
verify_file(tmpd, path)
except Exception:
print(f'FAIL on "{path}"', file=sys.stderr)
raise
if not any:
exit('Nothing found!')
if __name__ == "__main__":
if len(sys.argv) < 2:
exit('Usage: create-sys-script.py /path/to/test/')
outfile = os.path.abspath(os.path.dirname(sys.argv[0]) + '/sys-script.py')
print(f'Creating {outfile} using contents of {sys.argv[1]}/sys')
os.chdir(sys.argv[1])
with open(outfile, "w") as f:
os.chmod(outfile, OUTFILE_MODE)
f.write(OUTFILE_HEADER.replace(os.path.basename(sys.argv[0]),
os.path.basename(outfile)))
f.write(OUTFILE_FUNCS)
f.write(OUTFILE_MAIN)
process_sysdir(f)
with tempfile.TemporaryDirectory() as tmpd:
print(f'Recreating sys/ using {outfile} at {tmpd}')
subprocess.check_call([outfile, tmpd])
verify_script(tmpd)
print(f'Verification successful, {outfile} is correct')
| gpl-2.0 |
erikhvatum/RisWidget | ris_widget/qwidgets/layer_table.py | 1 | 21314 | # The MIT License (MIT)
#
# Copyright (c) 2015 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <ice.rikh@gmail.com>
from PyQt5 import Qt
from ..image import Image
from ..layer import Layer
from ..layer_stack import LayerList
from ..qdelegates.dropdown_list_delegate import DropdownListDelegate
from ..qdelegates.slider_delegate import SliderDelegate
from ..qdelegates.color_delegate import ColorDelegate
from ..qdelegates.checkbox_delegate import CheckboxDelegate
from ..qdelegates.special_selection_highlight_delegate import SpecialSelectionHighlightDelegate
from ..shared_resources import CHOICES_QITEMDATA_ROLE, FREEIMAGE
from .. import om
@om.item_view_shortcuts.with_selected_rows_deletion_shortcut
class LayerTableView(Qt.QTableView):
# model_changed = Qt.pyqtSignal(object)
# selection_model_changed = Qt.pyqtSignal(object)
def __init__(self, layer_table_model, parent=None):
super().__init__(parent)
self.layer_table_model = layer_table_model
self.horizontalHeader().setSectionResizeMode(Qt.QHeaderView.Interactive)
self.horizontalHeader().setHighlightSections(False)
self.horizontalHeader().setSectionsClickable(False)
self.horizontalHeader().setStretchLastSection(True)
self.verticalHeader().setHighlightSections(False)
self.verticalHeader().setSectionsClickable(False)
self.setTextElideMode(Qt.Qt.ElideMiddle)
self.checkbox_delegate = CheckboxDelegate(parent=self)
self.setItemDelegateForColumn(layer_table_model.property_columns['visible'], self.checkbox_delegate)
self.setItemDelegateForColumn(layer_table_model.property_columns['auto_min_max_enabled'], self.checkbox_delegate)
self.blend_function_delegate = DropdownListDelegate(self)
self.setItemDelegateForColumn(layer_table_model.property_columns['blend_function'], self.blend_function_delegate)
self.tint_delegate = ColorDelegate(self)
self.setItemDelegateForColumn(layer_table_model.property_columns['tint'], self.tint_delegate)
self.opacity_delegate = SliderDelegate(0.0, 1.0, self)
self.setItemDelegateForColumn(layer_table_model.property_columns['opacity'], self.opacity_delegate)
self.dead_cell_special_selection_highlight_delegate = SpecialSelectionHighlightDelegate(self)
for pn in ('image.dtype', 'image.type', 'image.size', 'image.name'):
self.setItemDelegateForColumn(layer_table_model.property_columns[pn], self.dead_cell_special_selection_highlight_delegate)
self.setSelectionBehavior(Qt.QAbstractItemView.SelectRows)
self.setSelectionMode(Qt.QAbstractItemView.ExtendedSelection)
self.setModel(layer_table_model)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDragDropMode(Qt.QAbstractItemView.DragDrop)
self.setDropIndicatorShown(True)
self.setDefaultDropAction(Qt.Qt.LinkAction)
self.horizontalHeader().resizeSections(Qt.QHeaderView.ResizeToContents)
# The text 'blend_function' is shorter than 'difference (advanced)', particularly with proportional fonts,
# so we make it 50% wider to be safe
col = layer_table_model.property_columns['blend_function']
self.horizontalHeader().resizeSection(col, self.horizontalHeader().sectionSize(col) * 1.5)
# The text 'image.size' is typically somewhat shorter than '2160x2560', so we widen that column
# by an arbitrary fudge factor...
col = layer_table_model.property_columns['image.size']
self.horizontalHeader().resizeSection(col, self.horizontalHeader().sectionSize(col) * 1.5)
# Making the opacity column exactly 100 pixels wide gives 1:1 mapping between horizontal
# position within the column and opacity slider integer % values
col = layer_table_model.property_columns['opacity']
self.horizontalHeader().resizeSection(col, 100)
def contextMenuEvent(self, event):
focused_midx = self.selectionModel().currentIndex()
if not focused_midx.isValid():
return
row = self.rowAt(event.pos().y())
col = self.columnAt(event.pos().x())
if row != focused_midx.row() or col != focused_midx.column():
return
try:
pname = self.layer_table_model.property_names[col]
except IndexError:
return
try:
psdg = self.layer_table_model._special_data_getters[pname]
except KeyError:
return
if psdg != self.layer_table_model._getd__defaultable_property:
return
try:
layer = self.layer_table_model.layer_stack.layers[-(row+1)]
except IndexError:
return
try:
p = getattr(type(layer), pname)
except AttributeError:
return
if p.is_default(layer):
return
menu = Qt.QMenu(self)
reset_to_default_action = Qt.QAction('Reset to default value', menu)
def on_reset_action():
p.__delete__(layer)
reset_to_default_action.triggered.connect(on_reset_action)
menu.addAction(reset_to_default_action)
menu.exec(event.globalPos())
# def setModel(self, model):
# super().setModel(model)
# self.model_changed.emit(self)
# def setSelectionModel(self, selection_model):
# super().setSelectionModel(selection_model)
# self.selection_model_changed.emit(self)
class InvertingProxyModel(Qt.QSortFilterProxyModel):
# Making a full proxy model that reverses/inverts indexes from Qt.QAbstractProxyModel or Qt.QIdentityProxyModel turns
# out to be tricky but would theoretically be more efficient than this implementation for large lists. However,
# a layer stack will never be large enough for the inefficiency to be a concern.
def __init__(self, parent=None):
super().__init__(parent)
self.sort(0, Qt.Qt.DescendingOrder)
def lessThan(self, lhs, rhs):
# We want the table upside-down and therefore will be sorting by index (aka row #)
return lhs.row() < rhs.row()
class LayerTableDragDropBehavior(om.signaling_list.DragDropModelBehavior):
def _fix_row_for_inversion(self, row):
if row == -1:
return 0
if row == len(self.signaling_list):
return 0
return row + 1
def canDropMimeData(self, mime_data, drop_action, row, column, parent):
return super().canDropMimeData(mime_data, drop_action, self._fix_row_for_inversion(row), column, parent)
def dropMimeData(self, mime_data, drop_action, row, column, parent):
return super().dropMimeData(mime_data, drop_action, self._fix_row_for_inversion(row), column, parent)
def can_drop_rows(self, src_model, src_rows, dst_row, dst_column, dst_parent):
return isinstance(src_model, LayerTableModel)
def can_drop_text(self, txt, dst_row, dst_column, dst_parent):
return bool(LayerList.from_json(txt))
def handle_dropped_qimage(self, qimage, name, dst_row, dst_column, dst_parent):
image = Image.from_qimage(qimage=qimage, name=name)
if image is not None:
layer = Layer(image=image)
self.layer_stack.get_layers()[dst_row:dst_row] = [layer]
return True
return False
def handle_dropped_files(self, fpaths, dst_row, dst_column, dst_parent):
freeimage = FREEIMAGE(show_messagebox_on_error=True, error_messagebox_owner=None)
if freeimage is None:
return False
layers = LayerList()
for fpath in fpaths:
if fpath.suffix in ('.json', '.jsn'):
with fpath.open('r') as f:
in_layers = LayerList.from_json(f.read())
if in_layers:
layers.extend(in_layers)
else:
fpath_str = str(fpath)
layers.append(Layer(Image(freeimage.read(fpath_str), name=fpath_str)))
self.layer_stack.get_layers()[dst_row:dst_row] = layers
return True
def handle_dropped_text(self, txt, dst_row, dst_column, dst_parent):
dropped_layers = LayerList.from_json(txt)
if dropped_layers:
self.layer_stack.get_layers()[dst_row:dst_row] = dropped_layers
def mimeData(self, midxs):
mime_data = super().mimeData(midxs)
mime_data.setText(self.layer_stack.get_layers().to_json())
return mime_data
class LayerTableModel(LayerTableDragDropBehavior, om.signaling_list.RecursivePropertyTableModel):
# LayerTableModel accesses PROPERTIES strictly via self.PROPERTIES and never via LayerTableModel.PROPERTIES,
# meaning that subclasses may safely add or remove columns by overridding PROPERTIES. For example, adding a column for
# a sublcassed Images having an "image_quality" property:
#
# class LayerStackTableModel_ImageQuality(LayerTableModel):
# PROPERTIES = ImageStackTableModel.PROPERTIES + ('image.image_quality',)
#
# And that's it, provided image_quality is always a plain string and should not be editable. Making it editable
# would require adding an entry to self._special_flag_getters. Alternative .flags may be overridden to activate the
# Qt.Qt.ItemIsEditable flag, as in this example:
#
# class LayerStackTableModel_ImageQuality(LayerTableModel):
# PROPERTIES = ImageStackTableModel.PROPERTIES + ('image.image_quality',)
# def flags(self, midx):
# if midx.column() == self.property_columns['image.image_quality']:
# return Qt.Qt.ItemIsEnabled | Qt.Qt.ItemIsSelectable | Qt.Qt.ItemNeverHasChildren | Qt.Qt.ItemIsEditable
# return super().flags(midx)
PROPERTIES = [
'visible',
'blend_function',
'auto_min_max_enabled',
'tint',
'opacity',
# 'getcolor_expression',
# 'transform_section',
# 'name',
'histogram_min',
'histogram_max',
'image.dtype',
'image.type',
'image.size',
'image.name'
]
def __init__(
self,
layer_stack,
blend_function_choice_to_value_mapping_pairs=None,
parent=None
):
super().__init__(
property_names=self.PROPERTIES,
signaling_list=None if layer_stack is None else layer_stack.layers,
parent=parent)
self.layer_stack = layer_stack
layer_stack.layers_replaced.connect(self._on_layers_replaced)
layer_stack.examine_layer_mode_action.toggled.connect(self._on_examine_layer_mode_toggled)
layer_stack.layer_focus_changed.connect(self._on_layer_focus_changed)
self._focused_row = -1
if blend_function_choice_to_value_mapping_pairs is None:
blend_function_choice_to_value_mapping_pairs = [
('screen', 'screen'),
('src-over (normal)', 'src-over')]
else:
blend_function_choice_to_value_mapping_pairs = list(blend_function_choice_to_value_mapping_pairs)
# Tack less commonly used / advanced blend function names onto list of dropdown choices without duplicating
# entries for values that have verbose choice names
adv_blend_functions = set(Layer.BLEND_FUNCTIONS.keys())
adv_blend_functions -= set(v for c, v in blend_function_choice_to_value_mapping_pairs)
blend_function_choice_to_value_mapping_pairs += [(v + ' (advanced)', v) for v in sorted(adv_blend_functions)]
self.blend_function_choices = tuple(c for c, v in blend_function_choice_to_value_mapping_pairs)
self.blend_function_choice_to_value = dict(blend_function_choice_to_value_mapping_pairs)
self.blend_function_value_to_choice = {v:c for c, v in blend_function_choice_to_value_mapping_pairs}
assert \
len(self.blend_function_choices) == \
len(self.blend_function_choice_to_value) == \
len(self.blend_function_value_to_choice),\
'Duplicate or unmatched (value, the 2nd pair component, does not appear in LayerClass.BLEND_FUNCTIONS) '\
'entry in blend_function_choice_to_value_mapping_pairs.'
self._special_data_getters = {
'visible' : self._getd_visible,
'auto_min_max_enabled' : self._getd_auto_min_max_enabled,
'tint' : self._getd_tint,
'blend_function' : self._getd_blend_function,
'getcolor_expression' : self._getd__defaultable_property,
'transform_section' : self._getd__defaultable_property,
'histogram_min' : self._getd__defaultable_property,
'histogram_max' : self._getd__defaultable_property,
'image.size' : self._getd_image_size,
'image.dtype' : self._getd_image_dtype}
self._special_flag_getters = {
'visible' : self._getf__always_checkable,
'auto_min_max_enabled' : self._getf__always_checkable,
'image.dtype' : self._getf__never_editable,
'image.type' : self._getf__never_editable,
'image.size' : self._getf__never_editable}
self._special_data_setters = {
'visible' : self._setd_visible,
'auto_min_max_enabled' : self._setd__checkable,
'blend_function' : self._setd_blend_function}
# flags #
def _getf_default(self, midx):
return super().flags(midx)
def _getf__always_checkable(self, midx):
return self._getf_default(midx) & ~Qt.Qt.ItemIsEditable | Qt.Qt.ItemIsUserCheckable
def _getf__never_editable(self, midx):
return super().flags(midx) & ~Qt.Qt.ItemIsEditable
def flags(self, midx):
if midx.isValid():
return self._special_flag_getters.get(self.property_names[midx.column()], self._getf_default)(midx)
else:
return self._getf_default(midx)
# data #
def _getd__default(self, midx, role):
return super().data(midx, role)
def _getd__defaultable_property(self, midx, role):
if role == Qt.Qt.FontRole and midx.isValid():
try:
pname = self.property_names[midx.column()]
element = self._signaling_list[midx.row()]
except IndexError:
return
try:
p = getattr(type(element), pname)
except AttributeError:
return
if p.is_default(element):
f = Qt.QFont()
f.setItalic(True)
return Qt.QVariant(f)
return self._getd__default(midx, role)
def _getd_visible(self, midx, role):
if role == Qt.Qt.CheckStateRole:
is_checked = self.get_cell(midx.row(), midx.column())
if self.layer_stack.examine_layer_mode_action.isChecked():
if self._focused_row == midx.row():
if is_checked:
r = Qt.Qt.Checked
else:
r = Qt.Qt.PartiallyChecked
else:
r = Qt.Qt.Unchecked
else:
if is_checked:
r = Qt.Qt.Checked
else:
r = Qt.Qt.Unchecked
return Qt.QVariant(r)
def _getd_auto_min_max_enabled(self, midx, role):
if role == Qt.Qt.CheckStateRole:
if self.get_cell(midx.row(), midx.column()):
r = Qt.Qt.Checked
else:
r = Qt.Qt.Unchecked
return Qt.QVariant(r)
def _getd_tint(self, midx, role):
if role == Qt.Qt.DecorationRole:
return Qt.QVariant(Qt.QColor(*(int(c*255) for c in self.signaling_list[midx.row()].tint)))
elif role == Qt.Qt.DisplayRole:
return Qt.QVariant(self.signaling_list[midx.row()].tint)
def _getd_blend_function(self, midx, role):
if role == CHOICES_QITEMDATA_ROLE:
return Qt.QVariant(self.blend_function_choices)
elif role == Qt.Qt.DisplayRole:
v = self.signaling_list[midx.row()].blend_function
try:
c = self.blend_function_value_to_choice[v]
return Qt.QVariant(c)
except KeyError:
Qt.qDebug('No choice for blend function "{}".'.format(v))
def _getd_image_size(self, midx, role):
if role == Qt.Qt.DisplayRole:
sz = self.get_cell(midx.row(), midx.column())
if sz is not None:
return Qt.QVariant('{}x{}'.format(sz.width(), sz.height()))
else:
return self._getd__default(midx, role)
def _getd_image_dtype(self, midx, role):
if role == Qt.Qt.DisplayRole:
image = self.signaling_list[midx.row()].image
if image is not None:
return Qt.QVariant(str(image.dtype))
else:
return self._getd__default(midx, role)
def data(self, midx, role=Qt.Qt.DisplayRole):
if midx.isValid():
d = self._special_data_getters.get(self.property_names[midx.column()], self._getd__default)(midx, role)
if isinstance(d, Qt.QVariant):
return d
else:
return self._getd__default(midx, role)
# setData #
def _setd__checkable(self, midx, value, role):
if role == Qt.Qt.CheckStateRole:
if isinstance(value, Qt.QVariant):
value = value.value()
return self.set_cell(midx.row(), midx.column(), value)
return False
def _setd_visible(self, midx, value, role):
if role == Qt.Qt.CheckStateRole:
if isinstance(value, Qt.QVariant):
value = value.value()
if value == Qt.Qt.Checked and self.layer_stack.examine_layer_mode_action.isChecked() and self._focused_row != midx.row():
# checkbox_delegate is telling us that, as a result of being hit, we should to check a visibility checkbox
# that is shown as partially checked. However, it is shown as partially checked because it is actually checked,
# but the effect of its checkedness is being supressed because we are in "examine layer" mode and the layer
# containing the visibility checkbox in question is not the current layer in the layer table. It is nominally
# checked, and so toggling it actually means unchecking it. This is the only instance where an override
# causes something checked to appear partially checked, rather than causing something unchecked to appear
# partially checked. And, so, in this one instance, we must special case *setting* of an overridable checkbox
# property.
value = Qt.Qt.Unchecked
return self.set_cell(midx.row(), midx.column(), value)
return False
def _setd_blend_function(self, midx, c, role):
if role == Qt.Qt.EditRole:
if isinstance(c, Qt.QVariant):
c = c.value()
try:
v = self.blend_function_choice_to_value[c]
self.signaling_list[midx.row()].blend_function = v
return True
except KeyError:
Qt.qDebug('No blend function for choice "{}".'.format(c))
return False
def setData(self, midx, value, role=Qt.Qt.EditRole):
if midx.isValid():
return self._special_data_setters.get(self.property_names[midx.column()], super().setData)(midx, value, role)
return False
def _on_layers_replaced(self, layer_stack, old_layers, layers):
self.signaling_list = layers
def _refresh_column(self, column):
if self.signaling_list is not None:
self.dataChanged.emit(self.createIndex(0, column), self.createIndex(len(self.signaling_list)-1, column))
def _on_examine_layer_mode_toggled(self):
self._refresh_column(self.property_columns['visible'])
def _on_layer_focus_changed(self, layer_stack, old_layer, layer):
self._handle_layer_focus_change()
def _handle_layer_focus_change(self):
self._focused_row = self.layer_stack.focused_layer_idx
self._on_examine_layer_mode_toggled() | mit |
rosmo/ansible | lib/ansible/modules/cloud/docker/docker_login.py | 12 | 10470 | #!/usr/bin/python
#
# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
# Chris Houseknecht, <house@redhat.com>
# James Tanner, <jtanner@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_login
short_description: Log into a Docker registry.
version_added: "2.0"
description:
- Provides functionality similar to the "docker login" command.
- Authenticate with a docker registry and add the credentials to your local Docker config file. Adding the
credentials to the config files allows future connections to the registry using tools such as Ansible's Docker
modules, the Docker CLI and Docker SDK for Python without needing to provide credentials.
- Running in check mode will perform the authentication without updating the config file.
options:
registry_url:
required: False
description:
- The registry URL.
type: str
default: "https://index.docker.io/v1/"
aliases:
- registry
- url
username:
description:
- The username for the registry account
type: str
required: yes
password:
description:
- The plaintext password for the registry account
type: str
required: yes
email:
required: False
description:
- "The email address for the registry account."
type: str
reauthorize:
description:
- Refresh existing authentication found in the configuration file.
type: bool
default: no
aliases:
- reauth
config_path:
description:
- Custom path to the Docker CLI configuration file.
type: path
default: ~/.docker/config.json
aliases:
- dockercfg_path
state:
version_added: '2.3'
description:
- This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
- To logout you only need the registry server, which defaults to DockerHub.
- Before 2.1 you could ONLY log in.
- Docker does not support 'logout' with a custom config file.
type: str
default: 'present'
choices: ['present', 'absent']
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "Docker API >= 1.20"
- "Only to be able to logout, that is for I(state) = C(absent): the C(docker) command line utility"
author:
- Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Log into DockerHub
docker_login:
username: docker
password: rekcod
- name: Log into private registry and force re-authorization
docker_login:
registry: your.private.registry.io
username: yourself
password: secrets3
reauthorize: yes
- name: Log into DockerHub using a custom config file
docker_login:
username: docker
password: rekcod
config_path: /tmp/.mydockercfg
- name: Log out of DockerHub
docker_login:
state: absent
'''
RETURN = '''
login_results:
description: Results from the login.
returned: when state='present'
type: dict
sample: {
"email": "testuer@yahoo.com",
"serveraddress": "localhost:5000",
"username": "testuser"
}
'''
import base64
import json
import os
import re
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.docker.common import AnsibleDockerClient, DEFAULT_DOCKER_REGISTRY, DockerBaseClass, EMAIL_REGEX
class LoginManager(DockerBaseClass):
def __init__(self, client, results):
super(LoginManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.registry_url = parameters.get('registry_url')
self.username = parameters.get('username')
self.password = parameters.get('password')
self.email = parameters.get('email')
self.reauthorize = parameters.get('reauthorize')
self.config_path = parameters.get('config_path')
if parameters['state'] == 'present':
self.login()
else:
self.logout()
def fail(self, msg):
self.client.fail(msg)
def login(self):
'''
Log into the registry with provided username/password. On success update the config
file with the new authorization.
:return: None
'''
if self.email and not re.match(EMAIL_REGEX, self.email):
self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
"/%s/" % (EMAIL_REGEX))
self.results['actions'].append("Logged into %s" % (self.registry_url))
self.log("Log into %s with username %s" % (self.registry_url, self.username))
try:
response = self.client.login(
self.username,
password=self.password,
email=self.email,
registry=self.registry_url,
reauth=self.reauthorize,
dockercfg_path=self.config_path
)
except Exception as exc:
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
# If user is already logged in, then response contains password for user
# This returns correct password if user is logged in and wrong password is given.
if 'password' in response:
del response['password']
self.results['login_result'] = response
if not self.check_mode:
self.update_config_file()
def logout(self):
'''
Log out of the registry. On success update the config file.
TODO: port to API once docker.py supports this.
:return: None
'''
cmd = [self.client.module.get_bin_path('docker', True), "logout", self.registry_url]
# TODO: docker does not support config file in logout, restore this when they do
# if self.config_path and self.config_file_exists(self.config_path):
# cmd.extend(["--config", self.config_path])
(rc, out, err) = self.client.module.run_command(cmd)
if rc != 0:
self.fail("Could not log out: %s" % err)
def config_file_exists(self, path):
if os.path.exists(path):
self.log("Configuration file %s exists" % (path))
return True
self.log("Configuration file %s not found." % (path))
return False
def create_config_file(self, path):
'''
Create a config file with a JSON blob containing an auths key.
:return: None
'''
self.log("Creating docker config file %s" % (path))
config_path_dir = os.path.dirname(path)
if not os.path.exists(config_path_dir):
try:
os.makedirs(config_path_dir)
except Exception as exc:
self.fail("Error: failed to create %s - %s" % (config_path_dir, str(exc)))
self.write_config(path, dict(auths=dict()))
def write_config(self, path, config):
try:
json.dump(config, open(path, "w"), indent=5, sort_keys=True)
except Exception as exc:
self.fail("Error: failed to write config to %s - %s" % (path, str(exc)))
def update_config_file(self):
'''
If the authorization not stored in the config file or reauthorize is True,
update the config file with the new authorization.
:return: None
'''
path = self.config_path
if not self.config_file_exists(path):
self.create_config_file(path)
try:
# read the existing config
config = json.load(open(path, "r"))
except ValueError:
self.log("Error reading config from %s" % (path))
config = dict()
if not config.get('auths'):
self.log("Adding auths dict to config.")
config['auths'] = dict()
if not config['auths'].get(self.registry_url):
self.log("Adding registry_url %s to auths." % (self.registry_url))
config['auths'][self.registry_url] = dict()
b64auth = base64.b64encode(
to_bytes(self.username) + b':' + to_bytes(self.password)
)
auth = to_text(b64auth)
encoded_credentials = dict(
auth=auth,
email=self.email
)
if config['auths'][self.registry_url] != encoded_credentials or self.reauthorize:
# Update the config file with the new authorization
config['auths'][self.registry_url] = encoded_credentials
self.log("Updating config file %s with new authorization for %s" % (path, self.registry_url))
self.results['actions'].append("Updated config file %s with new authorization for %s" % (
path, self.registry_url))
self.results['changed'] = True
self.write_config(path, config)
def main():
argument_spec = dict(
registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
username=dict(type='str'),
password=dict(type='str', no_log=True),
email=dict(type='str'),
reauthorize=dict(type='bool', default=False, aliases=['reauth']),
state=dict(type='str', default='present', choices=['present', 'absent']),
config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
)
required_if = [
('state', 'present', ['username', 'password']),
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_api_version='1.20',
)
results = dict(
changed=False,
actions=[],
login_result={}
)
LoginManager(client, results)
if 'actions' in results:
del results['actions']
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
chrismeyersfsu/ansible | test/units/module_utils/test_facts.py | 24 | 24159 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
# for testing
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock, patch
from ansible.module_utils import facts
class BaseTestFactsPlatform(unittest.TestCase):
platform_id = 'Generic'
fact_class = facts.Hardware
"""Verify that the automagic in Hardware.__new__ selects the right subclass."""
@patch('platform.system')
def test_new(self, mock_platform):
mock_platform.return_value = self.platform_id
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_subclass(self):
# 'Generic' will try to map to platform.system() that we are not mocking here
if self.platform_id == 'Generic':
return
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
class TestLinuxFactsPlatform(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = facts.LinuxHardware
class TestSunOSHardware(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = facts.SunOSHardware
class TestOpenBSDHardware(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = facts.OpenBSDHardware
class TestFreeBSDHardware(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = facts.FreeBSDHardware
class TestDragonFlyHardware(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = facts.DragonFlyHardware
class TestNetBSDHardware(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = facts.NetBSDHardware
class TestAIXHardware(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = facts.AIX
class TestHPUXHardware(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = facts.HPUX
class TestDarwinHardware(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = facts.Darwin
class TestGenericNetwork(BaseTestFactsPlatform):
platform_id = 'Generic'
fact_class = facts.Network
class TestLinuxNetwork(BaseTestFactsPlatform):
platform_id = 'Generic'
fact_class = facts.Network
class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform):
platform_id = 'Generic_BSD_Ifconfig'
fact_class = facts.GenericBsdIfconfigNetwork
class TestHPUXNetwork(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = facts.HPUXNetwork
class TestDarwinNetwork(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = facts.DarwinNetwork
class TestFreeBSDNetwork(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = facts.FreeBSDNetwork
class TestDragonFlyNetwork(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = facts.DragonFlyNetwork
class TestAIXNetwork(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = facts.AIXNetwork
class TestOpenBSDNetwork(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = facts.OpenBSDNetwork
class TestSunOSNetwork(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = facts.SunOSNetwork
class TestLinuxVirtual(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = facts.LinuxVirtual
class TestFreeBSDVirtual(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = facts.FreeBSDNetwork
class TestDragonFlyVirtual(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = facts.DragonFlyNetwork
class TestOpenBSDVirtual(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = facts.OpenBSDVirtual
class TestHPUXVirtual(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = facts.HPUXVirtual
class TestSunOSVirtual(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = facts.SunOSVirtual
LSBLK_OUTPUT = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
/dev/mapper/docker-253:1-1050967-pool
/dev/loop2
/dev/mapper/docker-253:1-1050967-pool
"""
LSBLK_OUTPUT_2 = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
"""
LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
MTAB = """
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
"""
MTAB_ENTRIES = \
[
['sysfs',
'/sys',
'sysfs',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'],
['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
['devtmpfs',
'/dev',
'devtmpfs',
'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
'0',
'0'],
['securityfs',
'/sys/kernel/security',
'securityfs',
'rw,nosuid,nodev,noexec,relatime',
'0',
'0'],
['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
['devpts',
'/dev/pts',
'devpts',
'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
'0',
'0'],
['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
['tmpfs',
'/sys/fs/cgroup',
'tmpfs',
'ro,seclabel,nosuid,nodev,noexec,mode=755',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/systemd',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
'0',
'0'],
['pstore',
'/sys/fs/pstore',
'pstore',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/devices',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,devices',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/freezer',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,freezer',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/memory',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,memory',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/pids',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,pids',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/blkio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,blkio',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/cpuset',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpuset',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/cpu,cpuacct',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/hugetlb',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,hugetlb',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/perf_event',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,perf_event',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/net_cls,net_prio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
'0',
'0'],
['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
['/dev/mapper/fedora_dhcp129--186-root',
'/',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'],
['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
['systemd-1',
'/proc/sys/fs/binfmt_misc',
'autofs',
'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
'0',
'0'],
['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
['hugetlbfs',
'/dev/hugepages',
'hugetlbfs',
'rw,seclabel,relatime',
'0',
'0'],
['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
['/dev/loop0',
'/var/lib/machines',
'btrfs',
'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
'0',
'0'],
['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# A 'none' fstype
['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# lets assume this is a bindmount
['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
['/dev/mapper/fedora_dhcp129--186-home',
'/home',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'],
['tmpfs',
'/run/user/1000',
'tmpfs',
'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
'0',
'0'],
['gvfsd-fuse',
'/run/user/1000/gvfs',
'fuse.gvfsd-fuse',
'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
'0',
'0'],
['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']]
BIND_MOUNTS = ['/not/a/real/bind_mount']
FINDMNT_OUTPUT = u"""
/sys sysfs sysfs rw,nosuid,nodev,noexec,relatime,seclabel
/proc proc proc rw,nosuid,nodev,noexec,relatime
/dev devtmpfs devtmpfs rw,nosuid,seclabel,size=8044400k,nr_inodes=2011100,mode=755
/sys/kernel/security securityfs securityfs rw,nosuid,nodev,noexec,relatime
/dev/shm tmpfs tmpfs rw,nosuid,nodev,seclabel
/dev/pts devpts devpts rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000
/run tmpfs tmpfs rw,nosuid,nodev,seclabel,mode=755
/sys/fs/cgroup tmpfs tmpfs ro,nosuid,nodev,noexec,seclabel,mode=755
/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
/sys/fs/pstore pstore pstore rw,nosuid,nodev,noexec,relatime,seclabel
/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices
/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer
/sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,relatime,memory
/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids
/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio
/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset
/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct
/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb
/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event
/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio
/sys/kernel/config configfs configfs rw,relatime
/ /dev/mapper/fedora_dhcp129--186-root ext4 rw,relatime,seclabel,data=ordered
/sys/fs/selinux selinuxfs selinuxfs rw,relatime
/proc/sys/fs/binfmt_misc systemd-1 autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct
/sys/kernel/debug debugfs debugfs rw,relatime,seclabel
/dev/hugepages hugetlbfs hugetlbfs rw,relatime,seclabel
/tmp tmpfs tmpfs rw,seclabel
/dev/mqueue mqueue mqueue rw,relatime,seclabel
/var/lib/machines /dev/loop0 btrfs rw,relatime,seclabel,space_cache,subvolid=5,subvol=/
/boot /dev/sda1 ext4 rw,relatime,seclabel,data=ordered
/home /dev/mapper/fedora_dhcp129--186-home ext4 rw,relatime,seclabel,data=ordered
/run/user/1000 tmpfs tmpfs rw,nosuid,nodev,relatime,seclabel,size=1611044k,mode=700,uid=1000,gid=1000
/run/user/1000/gvfs gvfsd-fuse fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/sys/fs/fuse/connections fusectl fusectl rw,relatime
/not/a/real/bind_mount /dev/sdz4[/some/other/path] ext4 rw,relatime,seclabel,data=ordered
/home/adrian/sshfs-grimlock grimlock.g.a: fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/home/adrian/sshfs-grimlock-single-quote grimlock.g.a:test_path/path_with'single_quotes
fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/home/adrian/sshfs-grimlock-single-quote-2 grimlock.g.a:path_with'single_quotes fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/home/adrian/fotos grimlock.g.a:/mnt/data/foto's fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
"""
class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
# FIXME: mock.patch instead
def setUp(self):
# The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global)
facts.GATHER_TIMEOUT = 10
def tearDown(self):
facts.GATHER_TIMEOUT = None
# The Hardware subclasses freakout if instaniated directly, so
# mock platform.system and inst Hardware() so we get a LinuxHardware()
# we can test.
@patch('ansible.module_utils.facts.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
@patch('ansible.module_utils.facts.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
@patch('ansible.module_utils.facts.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
def test_get_mount_facts(self,
mock_lsblk_uuid,
mock_find_bind_mounts,
mock_mtab_entries):
module = Mock()
# Returns a LinuxHardware-ish
lh = facts.LinuxHardware(module=module, load_on_init=False)
# Nothing returned, just self.facts modified as a side effect
lh.get_mount_facts()
self.assertIsInstance(lh.facts, dict)
self.assertIn('mounts', lh.facts)
self.assertIsInstance(lh.facts['mounts'], list)
self.assertIsInstance(lh.facts['mounts'][0], dict)
@patch('ansible.module_utils.facts.get_file_content', return_value=MTAB)
def test_get_mtab_entries(self, mock_get_file_content):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
mtab_entries = lh._mtab_entries()
self.assertIsInstance(mtab_entries, list)
self.assertIsInstance(mtab_entries[0], list)
self.assertEqual(len(mtab_entries), 38)
@patch('ansible.module_utils.facts.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT,''))
def test_find_bind_mounts(self, mock_run_findmnt):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
# If bind_mounts becomes another seq type, feel free to change
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 1)
self.assertIn('/not/a/real/bind_mount', bind_mounts)
@patch('ansible.module_utils.facts.LinuxHardware._run_findmnt', return_value=(37, '',''))
def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
def test_find_bind_mounts_no_findmnts(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = facts.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
@patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT,''))
def test_lsblk_uuid(self, mock_run_lsblk):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop9', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEquals(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
@patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT,''))
def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEquals(len(lsblk_uuids), 0)
def test_lsblk_uuid_no_lsblk(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEquals(len(lsblk_uuids), 0)
@patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2,''))
def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop0', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEquals(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
self.assertEquals(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
| gpl-3.0 |
sanjuro/RCJK | vendor/gdata/blogger/service.py | 264 | 5309 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to interact with the Blogger server."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import gdata.service
import gdata.blogger
class BloggerService(gdata.service.GDataService):
def __init__(self, email=None, password=None, source=None,
server='www.blogger.com', **kwargs):
"""Creates a client for the Blogger service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.blogger.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='blogger', source=source,
server=server, **kwargs)
def GetBlogFeed(self, uri=None):
"""Retrieve a list of the blogs to which the current user may manage."""
if not uri:
uri = '/feeds/default/blogs'
return self.Get(uri, converter=gdata.blogger.BlogFeedFromString)
def GetBlogCommentFeed(self, blog_id=None, uri=None):
"""Retrieve a list of the comments for this blog."""
if blog_id:
uri = '/feeds/%s/comments/default' % blog_id
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def GetBlogPostFeed(self, blog_id=None, uri=None):
if blog_id:
uri = '/feeds/%s/posts/default' % blog_id
return self.Get(uri, converter=gdata.blogger.BlogPostFeedFromString)
def GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None):
"""Retrieve a list of the comments for this particular blog post."""
if blog_id and post_id:
uri = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def AddPost(self, entry, blog_id=None, uri=None):
if blog_id:
uri = '/feeds/%s/posts/default' % blog_id
return self.Post(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def UpdatePost(self, entry, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Put(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def DeletePost(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
def AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None):
"""Adds a new comment to the specified blog post."""
if blog_id and post_id:
uri = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
return self.Post(comment_entry, uri,
converter=gdata.blogger.CommentEntryFromString)
def DeleteComment(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
class BlogQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None):
"""Constructs a query object for the list of a user's Blogger blogs.
Args:
feed: str (optional) The beginning of the URL to be queried. If the
feed is not set, and there is no blog_id passed in, the default
value is used ('/feeds/default/blogs').
params: dict (optional)
categories: list (optional)
blog_id: str (optional)
"""
if not feed and blog_id:
feed = '/feeds/default/blogs/%s' % blog_id
elif not feed:
feed = '/feeds/default/blogs'
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogPostQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None):
if not feed and blog_id and post_id:
feed = '/feeds/%s/posts/default/%s' % (blog_id, post_id)
elif not feed and blog_id:
feed = '/feeds/%s/posts/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogCommentQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None, comment_id=None):
if not feed and blog_id and comment_id:
feed = '/feeds/%s/comments/default/%s' % (blog_id, comment_id)
elif not feed and blog_id and post_id:
feed = '/feeds/%s/%s/comments/default' % (blog_id, post_id)
elif not feed and blog_id:
feed = '/feeds/%s/comments/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
| apache-2.0 |
krzysztof-magosa/ansible-modules-extras | monitoring/zabbix_group.py | 18 | 7427 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: zabbix_group
short_description: Zabbix host groups creates/deletes
description:
- Create host groups if they do not exist.
- Delete existing host groups if they exist.
version_added: "1.8"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
state:
description:
- Create or delete host group.
required: false
default: "present"
choices: [ "present", "absent" ]
timeout:
description:
- The timeout of API request(seconds).
default: 10
host_groups:
description:
- List of host groups to create or delete.
required: true
aliases: [ "host_group" ]
notes:
- Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Base create host groups example
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurent updates
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class HostGroup(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# create host group(s) if not exists
def create_host_group(self, group_names):
try:
group_add_list = []
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.create({'name': group_name})
group_add_list.append(group_name)
except Already_Exists:
return group_add_list
return group_add_list
except Exception as e:
self._module.fail_json(msg="Failed to create host group(s): %s" % e)
# delete host group(s)
def delete_host_group(self, group_ids):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.delete(group_ids)
except Exception as e:
self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
# get group ids by name
def get_group_ids(self, host_groups):
group_ids = []
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}})
for group in group_list:
group_id = group['groupid']
group_ids.append(group_id)
return group_ids, group_list
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str',required=False, default=None),
http_login_password=dict(type='str',required=False, default=None, no_log=True),
host_groups=dict(type='list', required=True, aliases=['host_group']),
state=dict(default="present", choices=['present','absent']),
timeout=dict(type='int', default=10)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_groups = module.params['host_groups']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
hostGroup = HostGroup(module, zbx)
group_ids = []
group_list = []
if host_groups:
group_ids, group_list = hostGroup.get_group_ids(host_groups)
if state == "absent":
# delete host groups
if group_ids:
delete_group_names = []
hostGroup.delete_host_group(group_ids)
for group in group_list:
delete_group_names.append(group['name'])
module.exit_json(changed=True,
result="Successfully deleted host group(s): %s." % ",".join(delete_group_names))
else:
module.exit_json(changed=False, result="No host group(s) to delete.")
else:
# create host groups
group_add_list = hostGroup.create_host_group(host_groups)
if len(group_add_list) > 0:
module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list)
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
suhussai/youtube-dl | youtube_dl/extractor/eporner.py | 129 | 2307 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
str_to_int,
)
class EpornerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
_TEST = {
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
'md5': '39d486f046212d8e1b911c52ab4691f8',
'info_dict': {
'id': '95008',
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
'ext': 'mp4',
'title': 'Infamous Tiffany Teen Strip Tease Video',
'duration': 1838,
'view_count': int,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
r'<title>(.*?) - EPORNER', webpage, 'title')
redirect_url = 'http://www.eporner.com/config5/%s' % video_id
player_code = self._download_webpage(
redirect_url, display_id, note='Downloading player config')
sources = self._search_regex(
r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', player_code, 'sources')
formats = []
for video_url, format_id in re.findall(r'file\s*:\s*"([^"]+)",\s*label\s*:\s*"([^"]+)"', sources):
fmt = {
'url': video_url,
'format_id': format_id,
}
m = re.search(r'^(\d+)', format_id)
if m:
fmt['height'] = int(m.group(1))
formats.append(fmt)
self._sort_formats(formats)
duration = parse_duration(self._html_search_meta('duration', webpage))
view_count = str_to_int(self._search_regex(
r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'duration': duration,
'view_count': view_count,
'formats': formats,
'age_limit': 18,
}
| unlicense |
theguardian/CherryStrap | mako/ext/beaker_cache.py | 100 | 2365 | """Provide a :class:`.CacheImpl` for the Beaker caching system."""
from mako import exceptions
from mako.cache import CacheImpl
try:
from beaker import cache as beaker_cache
except:
has_beaker = False
else:
has_beaker = True
_beaker_cache = None
class BeakerCacheImpl(CacheImpl):
"""A :class:`.CacheImpl` provided for the Beaker caching system.
This plugin is used by default, based on the default
value of ``'beaker'`` for the ``cache_impl`` parameter of the
:class:`.Template` or :class:`.TemplateLookup` classes.
"""
def __init__(self, cache):
if not has_beaker:
raise exceptions.RuntimeException(
"Can't initialize Beaker plugin; Beaker is not installed.")
global _beaker_cache
if _beaker_cache is None:
if 'manager' in cache.template.cache_args:
_beaker_cache = cache.template.cache_args['manager']
else:
_beaker_cache = beaker_cache.CacheManager()
super(BeakerCacheImpl, self).__init__(cache)
def _get_cache(self, **kw):
expiretime = kw.pop('timeout', None)
if 'dir' in kw:
kw['data_dir'] = kw.pop('dir')
elif self.cache.template.module_directory:
kw['data_dir'] = self.cache.template.module_directory
if 'manager' in kw:
kw.pop('manager')
if kw.get('type') == 'memcached':
kw['type'] = 'ext:memcached'
if 'region' in kw:
region = kw.pop('region')
cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw)
else:
cache = _beaker_cache.get_cache(self.cache.id, **kw)
cache_args = {'starttime': self.cache.starttime}
if expiretime:
cache_args['expiretime'] = expiretime
return cache, cache_args
def get_or_create(self, key, creation_function, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, createfunc=creation_function, **kw)
def put(self, key, value, **kw):
cache, kw = self._get_cache(**kw)
cache.put(key, value, **kw)
def get(self, key, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, **kw)
def invalidate(self, key, **kw):
cache, kw = self._get_cache(**kw)
cache.remove_value(key, **kw)
| gpl-2.0 |
eastbanctech/kubernetes-contrib | service-loadbalancer/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
pawpro/spa | spa/static/hashed.py | 2 | 8271 | import hashlib
import mimetypes
import os
import posixpath
import re
from time import time
from urlparse import urlsplit, urlunsplit
from werkzeug.exceptions import NotFound
from werkzeug.http import is_resource_modified, http_date
from spa.static.handlers import StaticHandler
from spa.utils import clean_path
class HashCache(object):
def __init__(self):
self.path_hashes = {}
self.contents = {}
def get_path_hash(self, path):
return self.path_hashes.get(path)
def set_path_hash(self, path, path_hash):
self.path_hashes[path] = path_hash
def get_contents(self, path):
return self.contents.get(path)
def set_contents(self, path, contents):
self.contents[path] = contents
class CacheBustingStaticHandler(StaticHandler):
css_url_patterns = (
(re.compile(r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""", re.IGNORECASE),
"""url("{hashed_url}")"""),
(re.compile(r"""(@import\s*["']\s*(.*?)["'])""", re.IGNORECASE),
"""@import url("{hashed_url}")"""),
)
def __init__(self, app, req, params, directory, hash_cache, **kwargs):
self.hash_cache = hash_cache
return super(CacheBustingStaticHandler, self).__init__(
app, req, params, directory, **kwargs
)
def get(self, filepath):
unhashed_path, path_hash = parse_hashed_filepath(filepath)
if unhashed_path is None:
return NotFound()
if self.hash_cache.get_path_hash(unhashed_path) is None:
# compute hash, and cache it.
file = self.get_file(unhashed_path)
if file is None:
return NotFound()
try:
hash_str = get_hash(file.handle)
self.hash_cache.set_path_hash(unhashed_path, hash_str)
finally:
file.handle.close()
# If hash we were passed doesn't equal the one we've computed and
# cached, then 404.
if path_hash != self.hash_cache.get_path_hash(unhashed_path):
return NotFound()
# For CSS stylesheets only, we'll rewrite content so that url()
# functions will point to hashed filenames instead of unhashed. The
# rewritten CSS content will be kept in memory.
if mimetypes.guess_type(filepath)[0] == 'text/css':
return self.make_css_response(unhashed_path)
return super(CacheBustingStaticHandler, self).get(unhashed_path)
def make_css_response(self, filepath):
def resp(environ, start_response):
file = self.get_file(filepath)
try:
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(file.mtime, file.size, file.name)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=file.mtime):
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
contents = self.hash_cache.get_contents(filepath)
if contents is None:
contents = file.handle.read()
for pat, tpl in self.css_url_patterns:
converter = self.get_converter(tpl)
contents = pat.sub(converter, contents)
self.hash_cache.set_contents(filepath, contents)
headers.extend((
('Content-Type', file.mimetype),
('Content-Length', len(contents)),
('Last-Modified', http_date(file.mtime))
))
start_response('200 OK', headers)
return [contents]
finally:
file.handle.close()
return resp
def get_converter(self, tpl):
def converter(matchobj):
matched, url = matchobj.groups()
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return url
return tpl.format(hashed_url=self.convert_css_url(url))
return converter
def convert_css_url(self, css_url):
split_url = urlsplit(css_url)
url_path = split_url.path
if not url_path.startswith('/'):
abs_url_path = self.make_path_absolute(url_path)
else:
abs_url_path = posixpath.realpath(url_path)
prefix = self.get_url_prefix()
# now make the path as it would be passed in to this handler when
# requested from the web. From there we can use existing methods on the
# class to resolve to a real file.
_, _, content_filepath = abs_url_path.partition(prefix)
content_filepath = clean_path(content_filepath)
content_file_hash = self.hash_cache.get_path_hash(content_filepath)
if content_file_hash is None:
content_file = self.get_file(content_filepath)
if content_file is None:
return 'NOT FOUND: "%s"' % url_path
try:
content_file_hash = get_hash(content_file.handle)
finally:
content_file.handle.close()
parts = list(split_url)
parts[2] = add_hash_to_filepath(url_path, content_file_hash)
url = urlunsplit(parts)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in css_url:
parts = list(urlsplit(url))
if not parts[3]:
parts[2] += '?'
url = urlunsplit(parts)
return url
def get_url_prefix(self):
"""
Return the mount point for this handler. So if you had a route like
this:
('/foo/bar/static/<path:filepath>', 'foo', Handler)
Then this function should return '/foo/bar/static/'
"""
env = self.request.environ
filepath = self.params['filepath']
prefix, _, _ = (env['SCRIPT_NAME'] +
env['PATH_INFO']).rpartition(filepath)
return prefix
def make_path_absolute(self, path):
"""
Given a relative url found inside the CSS file we're currently serving,
return an absolute form of that URL.
"""
env = self.request.environ
pinfo = posixpath.dirname(env['PATH_INFO'])
return posixpath.realpath(env['SCRIPT_NAME'] + pinfo + '/' + path)
def parse_hashed_filepath(filename, hash_len=12):
"""
Given a name like '/static/my_file.deadbeef1234.txt', return a tuple of the file name
without the hash, and the hash itself, like this:
('/static/my_file.txt', 'deadbeef1234')
If no hash part is found, then return (None, None).
"""
pat = '^(?P<before>.*)\.(?P<hash>[0-9,a-f]{%s})(?P<after>.*?)$' % hash_len
m = re.match(pat, filename)
if m is None:
return None, None
parts = m.groupdict()
return '{before}{after}'.format(**parts), parts['hash']
def add_hash_to_filepath(filepath, hash_str):
path, filename = os.path.split(filepath)
root, ext = os.path.splitext(filename)
return os.path.join(path, "%s.%s%s" % (root, hash_str, ext))
def get_hash(lines, hash_len=12):
md5 = hashlib.md5()
for line in lines:
md5.update(line)
return md5.hexdigest()[:hash_len]
class CacheBuster(object):
"""
A factory for making CacheBustingStaticHandler instances that share a cache
instance.
"""
def __init__(self, directory):
self.directory = directory
self.hash_cache = HashCache()
def __call__(self, app, req, params, **kwargs):
return CacheBustingStaticHandler(app, req, params,
directory=self.directory,
hash_cache=self.hash_cache,
**kwargs)
| bsd-3-clause |
ncoghlan/pip | pip/_vendor/requests/models.py | 410 | 29176 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(
to_native_string(url, 'utf8')))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| mit |
Tomographer/tomographer | test/py_gen_pickle_data.py | 1 | 4414 | #
# Generate pickled data for current tomographer version and store to data files (which are
# to be included in the git repo).
#
# These pickle files are loaded by pytest_pickle.py to make sure that data pickled by
# earlier versions of Tomographer can be successfully loaded, with full backwards
# compatibility.
#
from __future__ import print_function
import sys
import os
import os.path
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
import tomographer
import tomographer.mhrwtasks
import tomographer.densedm
import tomographer.multiproc
import pickle
version = tomographer.__version__
tveri = tomographer.version.version_info
pyversion = sys.version_info[0]
pickledatadirroot = os.path.join(os.path.dirname(os.path.realpath(os.path.abspath(__file__))),
'_pickledata-py{}'.format(pyversion))
if not os.path.isdir(pickledatadirroot):
os.mkdir(pickledatadirroot)
pickledatadir = os.path.join(pickledatadirroot, version)
if not os.path.isdir(pickledatadir):
os.mkdir(pickledatadir)
print("Pickling data to directory '{}', this is Tomographer version {} [{!r}]".format(pickledatadir, version, tveri))
# tomographer: Histogram classes
def do_histogram():
d = {}
if tveri < (5,0):
# Tomographer < 5.0
p = tomographer.UniformBinsHistogramParams(0.0, 1.0, 5)
d["UniformBinsHistogramParams"] = p
else:
# Tomographer >= 5.0
p = tomographer.HistogramParams(0.0, 1.0, 5)
d["HistogramParams"] = p
def load_values_maybe_error_bars(h, values, errors, off_chart=0):
if not h.has_error_bars:
h.load(values, off_chart)
else:
h.load(values, errors, off_chart)
if tveri < (5,0):
# Tomographer < 5.0
klasses = [(tomographer.UniformBinsHistogram, 'UniformBinsHistogram'),
(tomographer.UniformBinsRealHistogram, 'UniformBinsRealHistogram'),
(tomographer.UniformBinsHistogramWithErrorBars, 'UniformBinsHistogramWithErrorBars'),
(tomographer.AveragedSimpleHistogram, 'AveragedSimpleHistogram'),
(tomographer.AveragedSimpleRealHistogram, 'AveragedSimpleRealHistogram'),
(tomographer.AveragedErrorBarHistogram, 'AveragedErrorBarHistogram'),]
else:
# Tomographer >= 5.0
klasses = [(tomographer.Histogram, 'Histogram'),
(tomographer.HistogramWithErrorBars, 'HistogramWithErrorBars'),]
for c, n in klasses:
x = c(p)
load_values_maybe_error_bars(x, np.array([10, 20, 30, 40, 50]),
np.array([1, 2, 3, 4, 5]), 28)
d[n] = x
# save all of this stuff as a pickle
with open(os.path.join(pickledatadir, 'histograms.pickle'), 'wb') as f:
pickle.dump(d,f,2)
do_histogram()
# tomographer.densedm: LLH class
def do_densedm():
if tveri < (5,0):
# Pickling in tomographer.densedm was broken before Tomographer 5.0
return
# Following for Tomographer >= 5.0
d = {}
dmt = tomographer.densedm.DMTypes(dim=2)
llh = tomographer.densedm.IndepMeasLLH(dmt)
llh.setMeas(np.array([ [1, 0, 0, 0], [0, 1, 0, 0] ]), np.array([15, 85]))
d['llh'] = llh
# save all of this stuff as a pickle
with open(os.path.join(pickledatadir, 'densedm.pickle'), 'wb') as f:
pickle.dump(d,f,2)
do_densedm()
# tomographer.tomorun: Task results & reports
def do_tomorun():
d = {}
d['result'] = tomographer.tomorun.tomorun(
dim=2,
Emn=[
# +Y
np.array([[0.5, -0.5j],
[0.5j, 0.5]]),
# -Y
np.array([[0.5, 0.5j],
[-0.5j, 0.5]])
],
Nm=np.array([ 423, 87 ]),
fig_of_merit="obs-value",
observable=np.array([[0.5, -0.5j],
[0.5j, 0.5]]),
mhrw_params=tomographer.MHRWParams(
step_size=0.1,
n_sweep=10,
n_run=32768,
n_therm=500),
hist_params=tomographer.UniformBinsHistogramParams(0.7, 0.9, 20),
progress_fn=lambda x: print(x.getHumanReport())
)
# save all of this stuff as a pickle
with open(os.path.join(pickledatadir, 'tomorun.pickle'), 'wb') as f:
pickle.dump(d,f,2)
do_tomorun()
| mit |
openiitbombayx/edx-platform | lms/djangoapps/django_comment_client/management/commands/assign_role.py | 251 | 1144 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django_comment_common.models import Role
from django.contrib.auth.models import User
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the role instead of adding it'),
)
args = '<user|email> <role> <course_id>'
help = 'Assign a discussion forum role to a user '
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Usage is assign_role {0}'.format(self.args))
name_or_email, role, course_id = args
role = Role.objects.get(name=role, course_id=course_id)
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
if options['remove']:
user.roles.remove(role)
else:
user.roles.add(role)
print 'Success!'
| agpl-3.0 |
ronniehedrick/scapeshift | client/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| apache-2.0 |
linfuzki/autokey | src/lib/service.py | 47 | 17056 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time, logging, threading, traceback
import common
from iomediator import Key, IoMediator
from configmanager import *
if common.USING_QT:
from qtui.popupmenu import *
from PyKDE4.kdecore import i18n
else:
from gtkui.popupmenu import *
from macro import MacroManager
import scripting, model
logger = logging.getLogger("service")
MAX_STACK_LENGTH = 150
def threaded(f):
def wrapper(*args):
t = threading.Thread(target=f, args=args, name="Phrase-thread")
t.setDaemon(False)
t.start()
wrapper.__name__ = f.__name__
wrapper.__dict__ = f.__dict__
wrapper.__doc__ = f.__doc__
return wrapper
def synchronized(lock):
""" Synchronization decorator. """
def wrap(f):
def new_function(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return new_function
return wrap
class Service:
"""
Handles general functionality and dispatching of results down to the correct
execution service (phrase or script).
"""
def __init__(self, app):
logger.info("Starting service")
self.configManager = app.configManager
ConfigManager.SETTINGS[SERVICE_RUNNING] = False
self.mediator = None
self.app = app
self.inputStack = []
self.lastStackState = ''
self.lastMenu = None
def start(self):
self.mediator = IoMediator(self)
self.mediator.interface.initialise()
self.mediator.interface.start()
self.mediator.start()
ConfigManager.SETTINGS[SERVICE_RUNNING] = True
self.scriptRunner = ScriptRunner(self.mediator, self.app)
self.phraseRunner = PhraseRunner(self)
scripting.Store.GLOBALS = ConfigManager.SETTINGS[SCRIPT_GLOBALS]
logger.info("Service now marked as running")
def unpause(self):
ConfigManager.SETTINGS[SERVICE_RUNNING] = True
logger.info("Unpausing - service now marked as running")
def pause(self):
ConfigManager.SETTINGS[SERVICE_RUNNING] = False
logger.info("Pausing - service now marked as stopped")
def is_running(self):
return ConfigManager.SETTINGS[SERVICE_RUNNING]
def shutdown(self, save=True):
logger.info("Service shutting down")
if self.mediator is not None: self.mediator.shutdown()
if save: save_config(self.configManager)
def handle_mouseclick(self, rootX, rootY, relX, relY, button, windowTitle):
logger.debug("Received mouse click - resetting buffer")
self.inputStack = []
# If we had a menu and receive a mouse click, means we already
# hid the menu. Don't need to do it again
self.lastMenu = None
# Clear last to prevent undo of previous phrase in unexpected places
self.phraseRunner.clear_last()
def handle_keypress(self, rawKey, modifiers, key, windowName, windowClass):
logger.debug("Raw key: %r, modifiers: %r, Key: %s", rawKey, modifiers, key.encode("utf-8"))
logger.debug("Window visible title: %r, Window class: %r" % (windowName, windowClass))
self.configManager.lock.acquire()
windowInfo = (windowName, windowClass)
# Always check global hotkeys
for hotkey in self.configManager.globalHotkeys:
hotkey.check_hotkey(modifiers, rawKey, windowInfo)
if self.__shouldProcess(windowInfo):
itemMatch = None
menu = None
for item in self.configManager.hotKeys:
if item.check_hotkey(modifiers, rawKey, windowInfo):
itemMatch = item
break
if itemMatch is not None:
if not itemMatch.prompt:
logger.info("Matched hotkey phrase/script with prompt=False")
else:
logger.info("Matched hotkey phrase/script with prompt=True")
#menu = PopupMenu(self, [], [itemMatch])
menu = ([], [itemMatch])
else:
logger.debug("No phrase/script matched hotkey")
for folder in self.configManager.hotKeyFolders:
if folder.check_hotkey(modifiers, rawKey, windowInfo):
#menu = PopupMenu(self, [folder], [])
menu = ([folder], [])
if menu is not None:
logger.debug("Folder matched hotkey - showing menu")
if self.lastMenu is not None:
#self.lastMenu.remove_from_desktop()
self.app.hide_menu()
self.lastStackState = ''
self.lastMenu = menu
#self.lastMenu.show_on_desktop()
self.app.show_popup_menu(*menu)
if itemMatch is not None:
self.__tryReleaseLock()
self.__processItem(itemMatch)
### --- end of hotkey processing --- ###
modifierCount = len(modifiers)
if modifierCount > 1 or (modifierCount == 1 and Key.SHIFT not in modifiers):
self.inputStack = []
self.__tryReleaseLock()
return
### --- end of processing if non-printing modifiers are on --- ###
if self.__updateStack(key):
currentInput = ''.join(self.inputStack)
item, menu = self.__checkTextMatches([], self.configManager.abbreviations,
currentInput, windowInfo, True)
if not item or menu:
item, menu = self.__checkTextMatches(self.configManager.allFolders,
self.configManager.allItems,
currentInput, windowInfo)
if item:
self.__tryReleaseLock()
self.__processItem(item, currentInput)
elif menu:
if self.lastMenu is not None:
#self.lastMenu.remove_from_desktop()
self.app.hide_menu()
self.lastMenu = menu
#self.lastMenu.show_on_desktop()
self.app.show_popup_menu(*menu)
logger.debug("Input stack at end of handle_keypress: %s", self.inputStack)
self.__tryReleaseLock()
def __tryReleaseLock(self):
try:
self.configManager.lock.release()
except:
logger.debug("Ignored locking error in handle_keypress")
def run_folder(self, name):
folder = None
for f in self.configManager.allFolders:
if f.title == name:
folder = f
if folder is None:
raise Exception("No folder found with name '%s'" % name)
self.app.show_popup_menu([folder])
def run_phrase(self, name):
phrase = self.__findItem(name, model.Phrase, "phrase")
self.phraseRunner.execute(phrase)
def run_script(self, name):
script = self.__findItem(name, model.Script, "script")
self.scriptRunner.execute(script)
def __findItem(self, name, objType, typeDescription):
for item in self.configManager.allItems:
if item.description == name and isinstance(item, objType):
return item
raise Exception("No %s found with name '%s'" % (typeDescription, name))
@threaded
def item_selected(self, item):
time.sleep(0.25) # wait for window to be active
self.lastMenu = None # if an item has been selected, the menu has been hidden
self.__processItem(item, self.lastStackState)
def calculate_extra_keys(self, buffer):
"""
Determine extra keys pressed since the given buffer was built
"""
extraBs = len(self.inputStack) - len(buffer)
if extraBs > 0:
extraKeys = ''.join(self.inputStack[len(buffer)])
else:
extraBs = 0
extraKeys = ''
return (extraBs, extraKeys)
def __updateStack(self, key):
"""
Update the input stack in non-hotkey mode, and determine if anything
further is needed.
@return: True if further action is needed
"""
#if self.lastMenu is not None:
# if not ConfigManager.SETTINGS[MENU_TAKES_FOCUS]:
# self.app.hide_menu()
#
# self.lastMenu = None
if key == Key.ENTER:
# Special case - map Enter to \n
key = '\n'
if key == Key.TAB:
# Special case - map Tab to \t
key = '\t'
if key == Key.BACKSPACE:
if ConfigManager.SETTINGS[UNDO_USING_BACKSPACE] and self.phraseRunner.can_undo():
self.phraseRunner.undo_expansion()
else:
# handle backspace by dropping the last saved character
self.inputStack = self.inputStack[:-1]
return False
elif len(key) > 1:
# non-simple key
self.inputStack = []
self.phraseRunner.clear_last()
return False
else:
# Key is a character
self.phraseRunner.clear_last()
self.inputStack.append(key)
if len(self.inputStack) > MAX_STACK_LENGTH:
self.inputStack.pop(0)
return True
def __checkTextMatches(self, folders, items, buffer, windowInfo, immediate=False):
"""
Check for an abbreviation/predictive match among the given folder and items
(scripts, phrases).
@return: a tuple possibly containing an item to execute, or a menu to show
"""
itemMatches = []
folderMatches = []
for item in items:
if item.check_input(buffer, windowInfo):
if not item.prompt and immediate:
return (item, None)
else:
itemMatches.append(item)
for folder in folders:
if folder.check_input(buffer, windowInfo):
folderMatches.append(folder)
break # There should never be more than one folder match anyway
if self.__menuRequired(folderMatches, itemMatches, buffer):
self.lastStackState = buffer
#return (None, PopupMenu(self, folderMatches, itemMatches))
return (None, (folderMatches, itemMatches))
elif len(itemMatches) == 1:
self.lastStackState = buffer
return (itemMatches[0], None)
else:
return (None, None)
def __shouldProcess(self, windowInfo):
"""
Return a boolean indicating whether we should take any action on the keypress
"""
return windowInfo[0] != "Set Abbreviations" and self.is_running()
def __processItem(self, item, buffer=''):
self.inputStack = []
self.lastStackState = ''
if isinstance(item, model.Phrase):
self.phraseRunner.execute(item, buffer)
else:
self.scriptRunner.execute(item, buffer)
def __haveMatch(self, data):
folderMatch, itemMatches = data
if folder is not None:
return True
if len(items) > 0:
return True
return False
def __menuRequired(self, folders, items, buffer):
"""
@return: a boolean indicating whether a menu is needed to allow the user to choose
"""
if len(folders) > 0:
# Folders always need a menu
return True
if len(items) == 1:
return items[0].should_prompt(buffer)
elif len(items) > 1:
# More than one 'item' (phrase/script) needs a menu
return True
return False
class PhraseRunner:
def __init__(self, service):
self.service = service
self.macroManager = MacroManager(service.scriptRunner.engine)
self.lastExpansion = None
self.lastPhrase = None
self.lastBuffer = None
@threaded
#@synchronized(iomediator.SEND_LOCK)
def execute(self, phrase, buffer=''):
mediator = self.service.mediator
mediator.interface.begin_send()
expansion = phrase.build_phrase(buffer)
self.macroManager.process_expansion(expansion)
mediator.send_backspace(expansion.backspaces)
if phrase.sendMode == model.SendMode.KEYBOARD:
mediator.send_string(expansion.string)
else:
mediator.paste_string(expansion.string, phrase.sendMode)
mediator.interface.finish_send()
self.lastExpansion = expansion
self.lastPhrase = phrase
self.lastBuffer = buffer
def can_undo(self):
if self.lastExpansion is not None:
return model.TriggerMode.ABBREVIATION in self.lastPhrase.modes
def clear_last(self):
self.lastExpansion = None
self.lastPhrase = None
@synchronized(iomediator.SEND_LOCK)
def undo_expansion(self):
logger.info("Undoing last abbreviation expansion")
replay = self.lastPhrase.get_trigger_chars(self.lastBuffer)
logger.debug("Replay string: %s", replay)
logger.debug("Erase string: %r", self.lastExpansion.string)
mediator = self.service.mediator
#mediator.send_right(self.lastExpansion.lefts)
mediator.interface.begin_send()
mediator.remove_string(self.lastExpansion.string)
mediator.send_string(replay)
mediator.interface.finish_send()
self.clear_last()
class ScriptRunner:
def __init__(self, mediator, app):
self.mediator = mediator
self.app = app
self.error = ''
self.scope = globals()
self.scope["keyboard"]= scripting.Keyboard(mediator)
self.scope["mouse"]= scripting.Mouse(mediator)
self.scope["system"] = scripting.System()
self.scope["window"] = scripting.Window(mediator)
self.scope["engine"] = scripting.Engine(app.configManager, self)
if common.USING_QT:
self.scope["dialog"] = scripting.QtDialog()
self.scope["clipboard"] = scripting.QtClipboard(app)
else:
self.scope["dialog"] = scripting.GtkDialog()
self.scope["clipboard"] = scripting.GtkClipboard(app)
self.engine = self.scope["engine"]
@threaded
def execute(self, script, buffer=''):
logger.debug("Script runner executing: %r", script)
scope = self.scope.copy()
scope["store"] = script.store
backspaces, stringAfter = script.process_buffer(buffer)
self.mediator.send_backspace(backspaces)
try:
exec script.code in scope
except Exception, e:
logger.exception("Script error")
if common.USING_QT:
self.error = i18n("Script name: '%1'\n%2", script.description, traceback.format_exc())
self.app.notify_error(i18n("The script '%1' encountered an error", script.description))
else:
self.error = _("Script name: '%s'\n%s") % (script.description, traceback.format_exc())
self.app.notify_error(_("The script '%s' encountered an error") % script.description)
self.mediator.send_string(stringAfter)
def run_subscript(self, script):
scope = self.scope.copy()
scope["store"] = script.store
exec script.code in scope
| gpl-3.0 |
itkovian/sqlalchemy | test/sql/test_text.py | 23 | 25927 | """Test the TextClause and related constructs."""
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, eq_, \
assert_raises_message, expect_warnings, assert_warnings
from sqlalchemy import text, select, Integer, String, Float, \
bindparam, and_, func, literal_column, exc, MetaData, Table, Column,\
asc, func, desc, union
from sqlalchemy.types import NullType
from sqlalchemy.sql import table, column, util as sql_util
from sqlalchemy import util
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_basic(self):
self.assert_compile(
text("select * from foo where lala = bar"),
"select * from foo where lala = bar"
)
class SelectCompositionTest(fixtures.TestBase, AssertsCompiledSQL):
"""test the usage of text() implicit within the select() construct
when strings are passed."""
__dialect__ = 'default'
def test_select_composition_one(self):
self.assert_compile(select(
[
literal_column("foobar(a)"),
literal_column("pk_foo_bar(syslaal)")
],
text("a = 12"),
from_obj=[
text("foobar left outer join lala on foobar.foo = lala.foo")
]
),
"SELECT foobar(a), pk_foo_bar(syslaal) FROM foobar "
"left outer join lala on foobar.foo = lala.foo WHERE a = 12"
)
def test_select_composition_two(self):
s = select()
s.append_column(column("column1"))
s.append_column(column("column2"))
s.append_whereclause(text("column1=12"))
s.append_whereclause(text("column2=19"))
s = s.order_by("column1")
s.append_from(text("table1"))
self.assert_compile(s, "SELECT column1, column2 FROM table1 WHERE "
"column1=12 AND column2=19 ORDER BY column1")
def test_select_composition_three(self):
self.assert_compile(
select([column("column1"), column("column2")],
from_obj=table1).alias('somealias').select(),
"SELECT somealias.column1, somealias.column2 FROM "
"(SELECT column1, column2 FROM mytable) AS somealias"
)
def test_select_composition_four(self):
# test that use_labels doesn't interfere with literal columns
self.assert_compile(
select([
text("column1"), column("column2"),
column("column3").label("bar"), table1.c.myid],
from_obj=table1,
use_labels=True),
"SELECT column1, column2, column3 AS bar, "
"mytable.myid AS mytable_myid "
"FROM mytable"
)
def test_select_composition_five(self):
# test that use_labels doesn't interfere
# with literal columns that have textual labels
self.assert_compile(
select([
text("column1 AS foobar"), text("column2 AS hoho"),
table1.c.myid],
from_obj=table1, use_labels=True),
"SELECT column1 AS foobar, column2 AS hoho, "
"mytable.myid AS mytable_myid FROM mytable"
)
def test_select_composition_six(self):
# test that "auto-labeling of subquery columns"
# doesn't interfere with literal columns,
# exported columns don't get quoted
self.assert_compile(
select([
literal_column("column1 AS foobar"),
literal_column("column2 AS hoho"), table1.c.myid],
from_obj=[table1]).select(),
"SELECT column1 AS foobar, column2 AS hoho, myid FROM "
"(SELECT column1 AS foobar, column2 AS hoho, "
"mytable.myid AS myid FROM mytable)"
)
def test_select_composition_seven(self):
self.assert_compile(
select([
literal_column('col1'),
literal_column('col2')
], from_obj=table('tablename')).alias('myalias'),
"SELECT col1, col2 FROM tablename"
)
def test_select_composition_eight(self):
self.assert_compile(select(
[table1.alias('t'), text("foo.f")],
text("foo.f = t.id"),
from_obj=[text("(select f from bar where lala=heyhey) foo")]
),
"SELECT t.myid, t.name, t.description, foo.f FROM mytable AS t, "
"(select f from bar where lala=heyhey) foo WHERE foo.f = t.id")
def test_select_bundle_columns(self):
self.assert_compile(select(
[table1, table2.c.otherid,
text("sysdate()"), text("foo, bar, lala")],
and_(
text("foo.id = foofoo(lala)"),
text("datetime(foo) = Today"),
table1.c.myid == table2.c.otherid,
)
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, sysdate(), foo, bar, lala "
"FROM mytable, myothertable WHERE foo.id = foofoo(lala) AND "
"datetime(foo) = Today AND mytable.myid = myothertable.otherid")
class BindParamTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_legacy(self):
t = text("select * from foo where lala=:bar and hoho=:whee",
bindparams=[bindparam('bar', 4), bindparam('whee', 7)])
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_positional(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam('bar', 4), bindparam('whee', 7))
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bar=4, whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_positional_plus_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam('bar', 4), whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_literal_binds(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam('bar', 4), whee='whee')
self.assert_compile(
t,
"select * from foo where lala=4 and hoho='whee'",
checkparams={},
literal_binds=True
)
def _assert_type_map(self, t, compare):
map_ = dict(
(b.key, b.type) for b in t._bindparams.values()
)
for k in compare:
assert compare[k]._type_affinity is map_[k]._type_affinity
def test_typing_construction(self):
t = text("select * from table :foo :bar :bat")
self._assert_type_map(t, {"foo": NullType(),
"bar": NullType(),
"bat": NullType()})
t = t.bindparams(bindparam('foo', type_=String))
self._assert_type_map(t, {"foo": String(),
"bar": NullType(),
"bat": NullType()})
t = t.bindparams(bindparam('bar', type_=Integer))
self._assert_type_map(t, {"foo": String(),
"bar": Integer(),
"bat": NullType()})
t = t.bindparams(bat=45.564)
self._assert_type_map(t, {"foo": String(),
"bar": Integer(),
"bat": Float()})
def test_binds_compiled_named(self):
self.assert_compile(
text("select * from foo where lala=:bar and hoho=:whee").
bindparams(bar=4, whee=7),
"select * from foo where lala=%(bar)s and hoho=%(whee)s",
checkparams={'bar': 4, 'whee': 7},
dialect="postgresql"
)
def test_binds_compiled_positional(self):
self.assert_compile(
text("select * from foo where lala=:bar and hoho=:whee").
bindparams(bar=4, whee=7),
"select * from foo where lala=? and hoho=?",
checkparams={'bar': 4, 'whee': 7},
dialect="sqlite"
)
def test_missing_bind_kw(self):
assert_raises_message(
exc.ArgumentError,
"This text\(\) construct doesn't define a bound parameter named 'bar'",
text(":foo").bindparams,
foo=5,
bar=7)
def test_missing_bind_posn(self):
assert_raises_message(
exc.ArgumentError,
"This text\(\) construct doesn't define a bound parameter named 'bar'",
text(":foo").bindparams,
bindparam(
'foo',
value=5),
bindparam(
'bar',
value=7))
def test_escaping_colons(self):
# test escaping out text() params with a backslash
self.assert_compile(
text("select * from foo where clock='05:06:07' "
"and mork='\:mindy'"),
"select * from foo where clock='05:06:07' and mork=':mindy'",
checkparams={},
params={},
dialect="postgresql"
)
def test_text_in_select_nonfrom(self):
generate_series = text("generate_series(:x, :y, :z) as s(a)").\
bindparams(x=None, y=None, z=None)
s = select([
(func.current_date() + literal_column("s.a")).label("dates")
]).select_from(generate_series)
self.assert_compile(
s,
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={'y': None, 'x': None, 'z': None}
)
self.assert_compile(
s.params(x=5, y=6, z=7),
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={'y': 6, 'x': 5, 'z': 7}
)
class AsFromTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_basic_toplevel_resultmap_positional(self):
t = text("select id, name from user").columns(
column('id', Integer),
column('name')
)
compiled = t.compile()
eq_(compiled._create_result_map(),
{'id': ('id',
(t.c.id._proxies[0],
'id',
'id'),
t.c.id.type),
'name': ('name',
(t.c.name._proxies[0],
'name',
'name'),
t.c.name.type)})
def test_basic_toplevel_resultmap(self):
t = text("select id, name from user").columns(id=Integer, name=String)
compiled = t.compile()
eq_(compiled._create_result_map(),
{'id': ('id',
(t.c.id._proxies[0],
'id',
'id'),
t.c.id.type),
'name': ('name',
(t.c.name._proxies[0],
'name',
'name'),
t.c.name.type)})
def test_basic_subquery_resultmap(self):
t = text("select id, name from user").columns(id=Integer, name=String)
stmt = select([table1.c.myid]).select_from(
table1.join(t, table1.c.myid == t.c.id))
compiled = stmt.compile()
eq_(
compiled._create_result_map(),
{
"myid": ("myid",
(table1.c.myid, "myid", "myid"), table1.c.myid.type),
}
)
def test_column_collection_ordered(self):
t = text("select a, b, c from foo").columns(column('a'),
column('b'), column('c'))
eq_(t.c.keys(), ['a', 'b', 'c'])
def test_column_collection_pos_plus_bykey(self):
# overlapping positional names + type names
t = text("select a, b, c from foo").columns(
column('a'),
column('b'),
b=Integer,
c=String)
eq_(t.c.keys(), ['a', 'b', 'c'])
eq_(t.c.b.type._type_affinity, Integer)
eq_(t.c.c.type._type_affinity, String)
def _xy_table_fixture(self):
m = MetaData()
t = Table('t', m, Column('x', Integer), Column('y', Integer))
return t
def _mapping(self, stmt):
compiled = stmt.compile()
return dict(
(elem, key)
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
)
def test_select_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label('a'), t.c.y.label('b')
s = text("select x AS a, y AS b FROM t").columns(l1, l2)
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label('a'), t.c.y.label('b')
s = text("select x AS a, y AS b FROM t").columns(l1, l2).alias()
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y)
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y).alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_table_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
s = text("select ta.x, ta.y FROM t AS ta").columns(ta.c.x, ta.c.y)
mapping = self._mapping(s)
assert x not in mapping
def test_select_label_alt_name_table_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
l1, l2 = ta.c.x.label('a'), ta.c.y.label('b')
s = text("SELECT ta.x AS a, ta.y AS b FROM t AS ta").columns(l1, l2)
mapping = self._mapping(s)
assert x not in mapping
assert l1 in mapping
assert ta.c.x not in mapping
def test_cte(self):
t = text("select id, name from user").columns(
id=Integer,
name=String).cte('t')
s = select([table1]).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"WITH t AS (select id, name from user) "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, t WHERE mytable.myid = t.id"
)
def test_alias(self):
t = text("select id, name from user").columns(
id=Integer,
name=String).alias('t')
s = select([table1]).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, (select id, name from user) AS t "
"WHERE mytable.myid = t.id"
)
def test_scalar_subquery(self):
t = text("select id from user").columns(id=Integer)
subq = t.as_scalar()
assert subq.type._type_affinity is Integer()._type_affinity
s = select([table1.c.myid, subq]).where(table1.c.myid == subq)
self.assert_compile(
s,
"SELECT mytable.myid, (select id from user) AS anon_1 "
"FROM mytable WHERE mytable.myid = (select id from user)"
)
def test_build_bindparams(self):
t = text("select id from user :foo :bar :bat")
t = t.bindparams(bindparam("foo", type_=Integer))
t = t.columns(id=Integer)
t = t.bindparams(bar=String)
t = t.bindparams(bindparam('bat', value='bat'))
eq_(
set(t.element._bindparams),
set(["bat", "foo", "bar"])
)
class TextWarningsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _test(self, fn, arg, offending_clause, expected):
with expect_warnings("Textual "):
stmt = fn(arg)
self.assert_compile(stmt, expected)
assert_raises_message(
exc.SAWarning,
r"Textual (?:SQL|column|SQL FROM) expression %(stmt)r should be "
r"explicitly declared (?:with|as) text\(%(stmt)r\)" % {
"stmt": util.ellipses_string(offending_clause),
},
fn, arg
)
def test_where(self):
self._test(
select([table1.c.myid]).where, "myid == 5", "myid == 5",
"SELECT mytable.myid FROM mytable WHERE myid == 5"
)
def test_column(self):
self._test(
select, ["myid"], "myid",
"SELECT myid"
)
def test_having(self):
self._test(
select([table1.c.myid]).having, "myid == 5", "myid == 5",
"SELECT mytable.myid FROM mytable HAVING myid == 5"
)
def test_from(self):
self._test(
select([table1.c.myid]).select_from, "mytable", "mytable",
"SELECT mytable.myid FROM mytable, mytable" # two FROMs
)
class OrderByLabelResolutionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _test_warning(self, stmt, offending_clause, expected):
with expect_warnings(
"Can't resolve label reference %r;" % offending_clause):
self.assert_compile(
stmt,
expected
)
assert_raises_message(
exc.SAWarning,
"Can't resolve label reference %r; converting to text" %
offending_clause,
stmt.compile
)
def test_order_by_label(self):
stmt = select([table1.c.myid.label('foo')]).order_by('foo')
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo FROM mytable ORDER BY foo"
)
def test_order_by_colname(self):
stmt = select([table1.c.myid]).order_by('name')
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable ORDER BY mytable.name"
)
def test_order_by_alias_colname(self):
t1 = table1.alias()
stmt = select([t1.c.myid]).apply_labels().order_by('name')
self.assert_compile(
stmt,
"SELECT mytable_1.myid AS mytable_1_myid "
"FROM mytable AS mytable_1 ORDER BY mytable_1.name"
)
def test_order_by_named_label_from_anon_label(self):
s1 = select([table1.c.myid.label(None).label("foo"), table1.c.name])
stmt = s1.order_by("foo")
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo, mytable.name "
"FROM mytable ORDER BY foo"
)
def test_order_by_outermost_label(self):
# test [ticket:3335], assure that order_by("foo")
# catches the label named "foo" in the columns clause only,
# and not the label named "foo" in the FROM clause
s1 = select([table1.c.myid.label("foo"), table1.c.name]).alias()
stmt = select([s1.c.name, func.bar().label("foo")]).order_by("foo")
self.assert_compile(
stmt,
"SELECT anon_1.name, bar() AS foo FROM "
"(SELECT mytable.myid AS foo, mytable.name AS name "
"FROM mytable) AS anon_1 ORDER BY foo"
)
def test_unresolvable_warning_order_by(self):
stmt = select([table1.c.myid]).order_by('foobar')
self._test_warning(
stmt, "foobar",
"SELECT mytable.myid FROM mytable ORDER BY foobar"
)
def test_group_by_label(self):
stmt = select([table1.c.myid.label('foo')]).group_by('foo')
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo FROM mytable GROUP BY foo"
)
def test_group_by_colname(self):
stmt = select([table1.c.myid]).group_by('name')
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable GROUP BY mytable.name"
)
def test_unresolvable_warning_group_by(self):
stmt = select([table1.c.myid]).group_by('foobar')
self._test_warning(
stmt, "foobar",
"SELECT mytable.myid FROM mytable GROUP BY foobar"
)
def test_asc(self):
stmt = select([table1.c.myid]).order_by(asc('name'), 'description')
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"ORDER BY mytable.name ASC, mytable.description"
)
def test_group_by_subquery(self):
stmt = select([table1]).alias()
stmt = select([stmt]).apply_labels().group_by("myid")
self.assert_compile(
stmt,
"SELECT anon_1.myid AS anon_1_myid, anon_1.name AS anon_1_name, "
"anon_1.description AS anon_1_description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS anon_1 "
"GROUP BY anon_1.myid"
)
def test_order_by_func_label_desc(self):
stmt = select([func.foo('bar').label('fb'), table1]).\
order_by(desc('fb'))
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS fb, mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY fb DESC"
)
def test_pg_distinct(self):
stmt = select([table1]).distinct('name')
self.assert_compile(
stmt,
"SELECT DISTINCT ON (mytable.name) mytable.myid, "
"mytable.name, mytable.description FROM mytable",
dialect="postgresql"
)
def test_over(self):
stmt = select([column("foo"), column("bar")])
stmt = select(
[func.row_number().
over(order_by='foo', partition_by='bar')]
).select_from(stmt)
self.assert_compile(
stmt,
"SELECT row_number() OVER (PARTITION BY bar ORDER BY foo) "
"AS anon_1 FROM (SELECT foo, bar)"
)
def test_union_column(self):
s1 = select([table1])
s2 = select([table1])
stmt = union(s1, s2).order_by("name")
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY name"
)
def test_union_label(self):
s1 = select([func.foo("hoho").label('x')])
s2 = select([func.foo("Bar").label('y')])
stmt = union(s1, s2).order_by("x")
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS x UNION SELECT foo(:foo_2) AS y ORDER BY x"
)
def test_standalone_units_stringable(self):
self.assert_compile(
desc("somelabel"),
"somelabel DESC"
)
def test_columnadapter_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label('t1name'),
func.foo("hoho").label('x')]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta, anonymize_labels=True)
s1 = select([adapter.columns[expr] for expr in exprs]).\
apply_labels().order_by("myid", "t1name", "x")
def go():
# the labels here are anonymized, so label naming
# can't catch these.
self.assert_compile(
s1,
"SELECT mytable_1.myid AS mytable_1_myid, "
"mytable_1.name AS name_1, foo(:foo_2) AS foo_1 "
"FROM mytable AS mytable_1 ORDER BY mytable_1.myid, t1name, x"
)
assert_warnings(
go,
["Can't resolve label reference 't1name'",
"Can't resolve label reference 'x'"], regex=True)
def test_columnadapter_non_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label('t1name'),
func.foo("hoho").label('x')]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta)
s1 = select([adapter.columns[expr] for expr in exprs]).\
apply_labels().order_by("myid", "t1name", "x")
# labels are maintained
self.assert_compile(
s1,
"SELECT mytable_1.myid AS mytable_1_myid, "
"mytable_1.name AS t1name, foo(:foo_1) AS x "
"FROM mytable AS mytable_1 ORDER BY mytable_1.myid, t1name, x"
)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.