repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/toolchain-mipsel_r2_gcc-4.6-linaro_uClibc-0.9.33.2/mipsel-openwrt-linux/lib/libstdc++.so.6.0.16-gdb.py | 15 | 2477 | # -*- python -*-
# Copyright (C) 2009, 2010 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home/hiwifi/hc5761/staging_dir/toolchain-mipsel_r2_gcc-4.6-linaro_uClibc-0.9.33.2/share/gcc-4.6.4/python'
libdir = '/home/hiwifi/hc5761/staging_dir/toolchain-mipsel_r2_gcc-4.6-linaro_uClibc-0.9.33.2/mipsel-openwrt-linux-uclibc/lib'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| gpl-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-59/modules/sheets/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/__init__.py | 200 | 2221 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "1.0.3"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
def _description_of(path):
"""Return a string describing the probable encoding of a file."""
from charade.universaldetector import UniversalDetector
u = UniversalDetector()
for line in open(path, 'rb'):
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (path,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % path
def charade_cli():
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect.py somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
"""
from sys import argv
for path in argv[1:]:
print(_description_of(path))
| gpl-3.0 |
hradec/gaffer | python/GafferUI/PathPlugValueWidget.py | 7 | 6869 | ##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import os
## Supported plug metadata - used to provide arguments to a
# PathChooserDialogue :
#
# - "path:leaf"
# - "path:valid"
# - "path:bookmarks"
class PathPlugValueWidget( GafferUI.PlugValueWidget ) :
## path should be an instance of Gaffer.Path, optionally with
# filters applied. It will be updated with the contents of the plug.
#
# \deprecated The pathChooserDialogueKeywords argument will be removed
# in a future version - use metadata instead.
def __init__( self, plug, path=None, pathChooserDialogueKeywords=None, **kw ) :
self.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, self.__row, plug, **kw )
self.__path = path if path is not None else Gaffer.FileSystemPath()
self.__pathChooserDialogueKeywords = pathChooserDialogueKeywords
pathWidget = GafferUI.PathWidget( self.__path )
self._addPopupMenu( pathWidget )
self.__row.append( pathWidget )
button = GafferUI.Button( image = "pathChooser.png", hasFrame=False )
button.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__row.append( button )
pathWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self._updateFromPlug()
def path( self ) :
return self.__path
## Returns the PathWidget used to display the path.
def pathWidget( self ) :
return self.__row[0]
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.pathWidget().setHighlighted( highlighted )
def getToolTip( self ) :
result = GafferUI.PlugValueWidget.getToolTip( self )
if result :
result += "\n\n"
result += "## Actions\n\n"
result += "- <kbd>Tab</kbd> to autocomplete path component\n"
result += "- Select path component (or hit <kbd>↓</kbd>) to show path-level contents menu\n"
result += "- Select all to show path hierarchy menu\n"
return result
## May be reimplemented in derived classes to customise the
# creation of the PathChooserDialogue. Implementations should
# call the base class method and apply customisations to the
# result, rather than construct their own dialogue directly.
def _pathChooserDialogue( self ) :
# make a copy so we're not updating the main path as users browse
pathCopy = self.__path.copy()
# get the keywords for the dialogue constructor
# from the plug metadata.
pathChooserDialogueKeywords = {}
pathChooserDialogueKeywords["leaf"] = self.__metadataValue( "leaf" )
pathChooserDialogueKeywords["valid"] = self.__metadataValue( "valid" )
bookmarks = self.__metadataValue( "bookmarks" )
if bookmarks is not None :
pathChooserDialogueKeywords["bookmarks"] = GafferUI.Bookmarks.acquire( self.getPlug(), type( pathCopy ), bookmarks )
# support deprecated keywords passed to our constructor
if self.__pathChooserDialogueKeywords is not None :
if callable( self.__pathChooserDialogueKeywords ) :
pathChooserDialogueKeywords.update( self.__pathChooserDialogueKeywords() )
else :
pathChooserDialogueKeywords.update( self.__pathChooserDialogueKeywords )
# choose a sensible starting location if the path is empty.
if pathCopy.isEmpty() :
bookmarks = pathChooserDialogueKeywords.get( "bookmarks", None )
if bookmarks is not None :
pathCopy.setFromString( bookmarks.getDefault() )
elif isinstance( pathCopy, Gaffer.FileSystemPath ) :
pathCopy.setFromString( os.path.expanduser( "~" ) )
return GafferUI.PathChooserDialogue( pathCopy, **pathChooserDialogueKeywords )
def _updateFromPlug( self ) :
with self.getContext() :
with IECore.IgnoredExceptions( ValueError ) :
self.__path.setFromString( self.getPlug().getValue() )
self.pathWidget().setEditable( self._editable() )
self.__row[1].setEnabled( self._editable() ) # button
def _setPlugFromPath( self, path ) :
self.getPlug().setValue( str( self.__path ) )
def __setPlugValue( self, *args ) :
if not self._editable() :
return
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self._setPlugFromPath( self.__path )
# now we've transferred the text changes to the global undo queue, we remove them
# from the widget's private text editing undo queue. it will then ignore undo shortcuts,
# allowing them to fall through to the global undo shortcut.
self.pathWidget().clearUndo()
def __buttonClicked( self, widget ) :
dialogue = self._pathChooserDialogue()
chosenPath = dialogue.waitForPath( parentWindow = self.ancestor( GafferUI.Window ) )
if chosenPath is not None :
self.__path.setFromString( str( chosenPath ) )
self.__setPlugValue()
def __metadataValue( self, name ) :
v = Gaffer.Metadata.value( self.getPlug(), "path:" + name )
if v is None :
# Fall back to old metadata names
v = Gaffer.Metadata.value( self.getPlug(), "pathPlugValueWidget:" + name )
return v
| bsd-3-clause |
danieljaouen/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_vpn_facts.py | 20 | 6885 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ec2_vpc_vpn_facts
short_description: Gather facts about VPN Connections in AWS.
description:
- Gather facts about VPN Connections in AWS.
version_added: "2.6"
requirements: [ boto3 ]
author: Madhura Naniwadekar(@Madhura-CSI)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
required: false
vpn_connection_ids:
description:
- Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather facts about all vpn connections
ec2_vpc_vpn_facts:
- name: Gather facts about a filtered list of vpn connections, based on tags
ec2_vpc_vpn_facts:
filters:
"tag:Name": test-connection
register: vpn_conn_facts
- name: Gather facts about vpn connections by specifying connection IDs.
ec2_vpc_vpn_facts:
filters:
vpn-gateway-id: vgw-cbe66beb
register: vpn_conn_facts
'''
RETURN = '''
vpn_connections:
description: List of one or more VPN Connections.
returned: always
type: complex
contains:
category:
description: The category of the VPN connection.
returned: always
type: string
sample: VPN
customer_gatway_configuration:
description: The configuration information for the VPN connection's customer gateway (in the native XML format).
returned: always
type: string
customer_gateway_id:
description: The ID of the customer gateway at your end of the VPN connection.
returned: always
type: string
sample: cgw-17a53c37
options:
description: The VPN connection options.
returned: always
type: dict
sample: {
"static_routes_only": false
}
routes:
description: List of static routes associated with the VPN connection.
returned: always
type: complex
contains:
destination_cidr_block:
description: The CIDR block associated with the local subnet of the customer data center.
returned: always
type: string
sample: 10.0.0.0/16
state:
description: The current state of the static route.
returned: always
type: string
sample: available
state:
description: The current state of the VPN connection.
returned: always
type: string
sample: available
tags:
description: Any tags assigned to the VPN connection.
returned: always
type: dict
sample: {
"Name": "test-conn"
}
type:
description: The type of VPN connection.
returned: always
type: string
sample: ipsec.1
vgw_telemetry:
description: Information about the VPN tunnel.
returned: always
type: complex
contains:
accepted_route_count:
description: The number of accepted routes.
returned: always
type: int
sample: 0
last_status_change:
description: The date and time of the last change in status.
returned: always
type: datetime
sample: 2018-02-09T14:35:27+00:00
outside_ip_address:
description: The Internet-routable IP address of the virtual private gateway's outside interface.
returned: always
type: string
sample: 13.127.79.191
status:
description: The status of the VPN tunnel.
returned: always
type: string
sample: DOWN
status_message:
description: If an error occurs, a description of the error.
returned: always
type: string
sample: IPSEC IS DOWN
vpn_connection_id:
description: The ID of the VPN connection.
returned: always
type: string
sample: vpn-f700d5c0
vpn_gateway_id:
description: The ID of the virtual private gateway at the AWS side of the VPN connection.
returned: always
type: string
sample: vgw-cbe56bfb
'''
import json
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ec2_argument_spec,
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def list_vpn_connections(connection, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
try:
result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
except ValueError as e:
module.fail_json_aws(e, msg="Cannot validate JSON data")
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
if snaked_vpn_connections:
for vpn_connection in snaked_vpn_connections:
vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
vpn_connection_ids=dict(default=[], type='list'),
filters=dict(default={}, type='dict')
)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
mutually_exclusive=[['vpn_connection_ids', 'filters']],
supports_check_mode=True)
connection = module.client('ec2')
list_vpn_connections(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
procangroup/edx-platform | cms/djangoapps/contentstore/migrations/0001_initial.py | 25 | 1948 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PushNotificationConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
migrations.CreateModel(
name='VideoUploadConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('profile_whitelist', models.TextField(help_text=b'A comma-separated list of names of profiles to include in video encoding downloads.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| agpl-3.0 |
Linux-Box/lbgui | lib/python/Plugins/Extensions/TuxboxPlugins/plugin.py | 161 | 1201 | # must be fixed for the new plugin interface
from Tools.BoundFunction import boundFunction
from Tools.Directories import pathExists
from Plugins.Plugin import PluginDescriptor
from pluginrunner import PluginRunner
from os import listdir
TUXBOX_PLUGINS_PATH = "/usr/lib/tuxbox/plugins/"
def getPlugins():
pluginlist = []
if pathExists(TUXBOX_PLUGINS_PATH):
dir = listdir(TUXBOX_PLUGINS_PATH)
for x in dir:
if x[-3:] == "cfg":
params = getPluginParams(x)
pluginlist.append(PluginDescriptor(name=params["name"], description=params["desc"], where = PluginDescriptor.WHERE_PLUGINMENU, icon="tuxbox.png", needsRestart = True, fnc=boundFunction(main, plugin=x)))
return pluginlist
def getPluginParams(file):
params = {}
try:
file = open(TUXBOX_PLUGINS_PATH + file, "r")
for x in file.readlines():
split = x.split("=")
params[split[0]] = split[1]
file.close()
except IOError:
print "no tuxbox plugins found"
return params
def main(session, plugin, **kwargs):
print "Running plugin " + plugin[:-4] + ".so with config file", plugin
print getPluginParams(plugin)
session.open(PluginRunner, plugin[:-4].split(".so")[0])
def Plugins(**kwargs):
return getPlugins()
| gpl-2.0 |
javierTerry/odoo | openerp/addons/base/tests/test_menu.py | 501 | 1450 | import openerp.tests.common as common
class test_menu(common.TransactionCase):
def setUp(self):
super(test_menu,self).setUp()
self.Menus = self.registry('ir.ui.menu')
def test_00_menu_deletion(self):
"""Verify that menu deletion works properly when there are child menus, and those
are indeed made orphans"""
cr, uid, Menus = self.cr, self.uid, self.Menus
# Generic trick necessary for search() calls to avoid hidden menus
ctx = {'ir.ui.menu.full_list': True}
root_id = Menus.create(cr, uid, {'name': 'Test root'})
child1_id = Menus.create(cr, uid, {'name': 'Test child 1', 'parent_id': root_id})
child2_id = Menus.create(cr, uid, {'name': 'Test child 2', 'parent_id': root_id})
child21_id = Menus.create(cr, uid, {'name': 'Test child 2-1', 'parent_id': child2_id})
all_ids = [root_id, child1_id, child2_id, child21_id]
# delete and check that direct children are promoted to top-level
# cfr. explanation in menu.unlink()
Menus.unlink(cr, uid, [root_id])
remaining_ids = Menus.search(cr, uid, [('id', 'in', all_ids)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id, child21_id], remaining_ids)
orphan_ids = Menus.search(cr, uid, [('id', 'in', all_ids), ('parent_id', '=', False)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id], orphan_ids)
| agpl-3.0 |
Eric89GXL/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 6 | 9808 | import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_multilabel_classification():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator=True,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_equal(X[:, 0], t * np.cos(t))
assert_array_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_equal(X[:, 0], np.sin(t))
assert_array_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
miguelfervi/SSBW-Restaurantes | restaurantes/lib/python2.7/site-packages/pip/__init__.py | 75 | 10604 | #!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import optparse
import warnings
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation, dist_is_editable
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "8.1.2"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWArning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
haad/ansible | lib/ansible/modules/network/avi/avi_api_version.py | 41 | 2544 | #!/usr/bin/python
"""
# Created on July 24, 2017
#
# @author: Vilian Atmadzhov (vilian.atmadzhov@paddypowerbetfair.com) GitHub ID: vivobg
#
# module_check: not supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# Vilian Atmadzhov, <vilian.atmadzhov@paddypowerbetfair.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_api_version
author: Vilian Atmadzhov (vilian.atmadzhov@paddypowerbetfair.com)
short_description: Avi API Version Module
description:
- This module can be used to obtain the version of the Avi REST API. U(https://avinetworks.com/)
version_added: 2.5
requirements: [ avisdk ]
options: {}
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Get AVI API version
avi_api_version:
controller: ""
username: ""
password: ""
tenant: ""
register: avi_controller_version
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, AviCredentials)
from avi.sdk.avi_api import ApiSession
except ImportError:
HAS_AVI = False
def main():
module = AnsibleModule(argument_spec=avi_common_argument_spec())
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
try:
api_creds = AviCredentials()
api_creds.update_from_ansible_module(module)
api = ApiSession.get_session(
api_creds.controller, api_creds.username,
password=api_creds.password,
timeout=api_creds.timeout, tenant=api_creds.tenant,
tenant_uuid=api_creds.tenant_uuid, token=api_creds.token,
port=api_creds.port)
remote_api_version = api.remote_api_version
remote = {}
for key in remote_api_version.keys():
remote[key.lower()] = remote_api_version[key]
api.close()
module.exit_json(changed=False, obj=remote)
except Exception as e:
module.fail_json(msg="Unable to get an AVI session. {}".format(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
madhuni/AstroBox | src/ext/makerbot_driver/Writer/AbstractWriter.py | 6 | 1032 | class AbstractWriter(object):
def __init__(self, file, condition):
self.external_stop = False
self._condition = condition
self.file = file
def open(self):
""" Opens the currently set port"""
raise NotImplementedError()
def is_open(self):
""" Fluch of file like objects. """
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def send_action_payload(self, payload):
""" Send the given payload as an action command
@param bytearray payload Payload to send as an action payload
"""
raise NotImplementedError()
def send_query_payload(self, payload):
""" Send the given payload as a query command
@param bytearray payload Payload to send as a query packey
@return The packet returned by send_command
"""
raise NotImplementedError()
def set_external_stop(self, value=True):
with self._condition:
self.external_stop = value
| agpl-3.0 |
Split-Screen/android_kernel_samsung_msm8930-common | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
nitzmahone/ansible | lib/ansible/modules/network/f5/bigip_qkview.py | 21 | 19430 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_qkview
short_description: Manage qkviews on the device
description:
- Manages creating and downloading qkviews from a BIG-IP. Various
options can be provided when creating qkviews. The qkview is important
when dealing with F5 support. It may be required that you upload this
qkview to the supported channels during resolution of an SRs that you
may have opened.
version_added: 2.4
options:
filename:
description:
- Name of the qkview to create on the remote BIG-IP.
default: "localhost.localdomain.qkview"
dest:
description:
- Destination on your local filesystem when you want to save the qkview.
required: True
asm_request_log:
description:
- When C(True), includes the ASM request log data. When C(False),
excludes the ASM request log data.
default: no
type: bool
max_file_size:
description:
- Max file size, in bytes, of the qkview to create. By default, no max
file size is specified.
default: 0
complete_information:
description:
- Include complete information in the qkview.
default: no
type: bool
exclude_core:
description:
- Exclude core files from the qkview.
default: no
type: bool
exclude:
description:
- Exclude various file from the qkview.
choices:
- all
- audit
- secure
- bash_history
force:
description:
- If C(no), the file will only be transferred if the destination does not
exist.
default: yes
type: bool
notes:
- This module does not include the "max time" or "restrict to blade" options.
- If you are using this module with either Ansible Tower or Ansible AWX, you
should be aware of how these Ansible products execute jobs in restricted
environments. More informat can be found here
https://clouddocs.f5.com/products/orchestration/ansible/devel/usage/module-usage-with-tower.html
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Fetch a qkview from the remote device
bigip_qkview:
asm_request_log: yes
exclude:
- audit
- secure
dest: /tmp/localhost.localdomain.qkview
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import os
import re
import socket
import ssl
import time
from ansible.module_utils.basic import AnsibleModule
from distutils.version import LooseVersion
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import download_file
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import download_file
class Parameters(AnsibleF5Parameters):
api_attributes = [
'asm_request_log',
'complete_information',
'exclude',
'exclude_core',
'filename_cmd',
'max_file_size',
]
returnables = ['stdout', 'stdout_lines', 'warnings']
@property
def exclude(self):
if self._values['exclude'] is None:
return None
exclude = ' '.join(self._values['exclude'])
return "--exclude='{0}'".format(exclude)
@property
def exclude_raw(self):
return self._values['exclude']
@property
def exclude_core(self):
if self._values['exclude']:
return '-C'
else:
return None
@property
def complete_information(self):
if self._values['complete_information']:
return '-c'
return None
@property
def max_file_size(self):
if self._values['max_file_size'] in [None]:
return None
return '-s {0}'.format(self._values['max_file_size'])
@property
def asm_request_log(self):
if self._values['asm_request_log']:
return '-o asm-request-log'
return None
@property
def filename(self):
pattern = r'^[\w\.]+$'
filename = os.path.basename(self._values['filename'])
if re.match(pattern, filename):
return filename
else:
raise F5ModuleError(
"The provided filename must contain word characters only."
)
@property
def filename_cmd(self):
return '-f {0}'.format(self.filename)
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.is_version_less_than_14():
manager = self.get_manager('madm')
else:
manager = self.get_manager('bulk')
return manager.exec_module()
def get_manager(self, type):
if type == 'madm':
return MadmLocationManager(**self.kwargs)
elif type == 'bulk':
return BulkLocationManager(**self.kwargs)
def is_version_less_than_14(self):
uri = "https://{0}:{1}/mgmt/tm/sys".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
version = urlparse.parse_qs(urlparse.urlparse(response['selfLink']).query)['ver'][0]
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(params=changed)
def exec_module(self):
result = dict()
self.present()
result.update(**self.changes.to_return())
result.update(dict(changed=False))
return result
def present(self):
if os.path.exists(self.want.dest) and not self.want.force:
raise F5ModuleError(
"The specified 'dest' file already exists."
)
if not os.path.exists(os.path.dirname(self.want.dest)):
raise F5ModuleError(
"The directory of your 'dest' file does not exist."
)
if self.want.exclude:
choices = ['all', 'audit', 'secure', 'bash_history']
if not all(x in choices for x in self.want.exclude_raw):
raise F5ModuleError(
"The specified excludes must be in the following list: "
"{0}".format(','.join(choices))
)
self.execute()
def exists(self):
params = dict(
command='run',
utilCmdArgs=self.remote_dir
)
uri = "https://{0}:{1}/mgmt/tm/util/unix-ls".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
try:
if self.want.filename in response['commandResult']:
return True
except KeyError:
return False
def execute(self):
response = self.execute_on_device()
if not response:
raise F5ModuleError(
"Failed to create qkview on device."
)
result = self._move_qkview_to_download()
if not result:
raise F5ModuleError(
"Failed to move the file to a downloadable location"
)
self._download_file()
if not os.path.exists(self.want.dest):
raise F5ModuleError(
"Failed to save the qkview to local disk"
)
self._delete_qkview()
result = self.exists()
if result:
raise F5ModuleError(
"Failed to remove the remote qkview"
)
def _delete_qkview(self):
tpath_name = '{0}/{1}'.format(self.remote_dir, self.want.filename)
params = dict(
command='run',
utilCmdArgs=tpath_name
)
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
def execute_on_device(self):
self._upsert_temporary_cli_script_on_device()
task_id = self._create_async_task_on_device()
self._exec_async_task_on_device(task_id)
self._wait_for_async_task_to_finish_on_device(task_id)
self._remove_temporary_cli_script_from_device()
return True
def _upsert_temporary_cli_script_on_device(self):
args = {
"name": "__ansible_mkqkview",
"apiAnonymous": """
proc script::run {} {
set cmd [lreplace $tmsh::argv 0 0]; eval "exec $cmd 2> /dev/null"
}
"""
}
result = self._create_temporary_cli_script_on_device(args)
if result:
return True
return self._update_temporary_cli_script_on_device(args)
def _create_temporary_cli_script_on_device(self, args):
uri = "https://{0}:{1}/mgmt/tm/cli/script".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'code' in response and response['code'] in [404, 409]:
return False
except ValueError:
pass
if resp.status in [404, 409]:
return False
return True
def _update_temporary_cli_script_on_device(self, args):
uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', '__ansible_mkqkview')
)
resp = self.client.api.put(uri, json=args)
try:
resp.json()
return True
except ValueError:
raise F5ModuleError(
"Failed to update temporary cli script on device."
)
def _create_async_task_on_device(self):
"""Creates an async cli script task in the REST API
Returns:
int: The ID of the task staged for running.
:return:
"""
command = ' '.join(self.want.api_params().values())
args = {
"command": "run",
"name": "__ansible_mkqkview",
"utilCmdArgs": "/usr/bin/qkview {0}".format(command)
}
uri = "https://{0}:{1}/mgmt/tm/task/cli/script".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
return response['_taskId']
except ValueError:
raise F5ModuleError(
"Failed to create the async task on the device."
)
def _exec_async_task_on_device(self, task_id):
args = {"_taskState": "VALIDATING"}
uri = "https://{0}:{1}/mgmt/tm/task/cli/script/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
resp = self.client.api.put(uri, json=args)
try:
resp.json()
return True
except ValueError:
raise F5ModuleError(
"Failed to execute the async task on the device"
)
def _wait_for_async_task_to_finish_on_device(self, task_id):
uri = "https://{0}:{1}/mgmt/tm/task/cli/script/{2}/result".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
while True:
try:
resp = self.client.api.get(uri, timeout=10)
except (socket.timeout, ssl.SSLError):
continue
try:
response = resp.json()
except ValueError:
# It is possible that the API call can return invalid JSON.
# This invalid JSON appears to be just empty strings.
continue
if response['_taskState'] == 'FAILED':
raise F5ModuleError(
"qkview creation task failed unexpectedly."
)
if response['_taskState'] == 'COMPLETED':
return True
time.sleep(3)
def _remove_temporary_cli_script_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/task/cli/script/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', '__ansible_mkqkview')
)
try:
self.client.api.delete(uri)
return True
except ValueError:
raise F5ModuleError(
"Failed to remove the temporary cli script from the device."
)
def _move_qkview_to_download(self):
uri = "https://{0}:{1}/mgmt/tm/util/unix-mv/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
args = dict(
command='run',
utilCmdArgs='/var/tmp/{0} {1}/{0}'.format(self.want.filename, self.remote_dir)
)
self.client.api.post(uri, json=args)
return True
class BulkLocationManager(BaseManager):
def __init__(self, *args, **kwargs):
super(BulkLocationManager, self).__init__(**kwargs)
self.remote_dir = '/var/config/rest/bulk'
def _download_file(self):
uri = "https://{0}:{1}/mgmt/shared/file-transfer/bulk/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.filename
)
download_file(self.client, uri, self.want.dest)
if os.path.exists(self.want.dest):
return True
return False
class MadmLocationManager(BaseManager):
def __init__(self, *args, **kwargs):
super(MadmLocationManager, self).__init__(**kwargs)
self.remote_dir = '/var/config/rest/madm'
def _download_file(self):
uri = "https://{0}:{1}/mgmt/shared/file-transfer/madm/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.filename
)
download_file(self.client, uri, self.want.dest)
if os.path.exists(self.want.dest):
return True
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
filename=dict(
default='localhost.localdomain.qkview'
),
asm_request_log=dict(
type='bool',
default='no',
),
max_file_size=dict(
type='int',
),
complete_information=dict(
default='no',
type='bool'
),
exclude_core=dict(
default="no",
type='bool'
),
force=dict(
default=True,
type='bool'
),
exclude=dict(
type='list',
choices=[
'all', 'audit', 'secure', 'bash_history'
]
),
dest=dict(
type='path',
required=True
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
xecle/git-repo | subcmds/cherry_pick.py | 45 | 3410 | #
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import sys
from command import Command
from git_command import GitCommand
CHANGE_ID_RE = re.compile(r'^\s*Change-Id: I([0-9a-f]{40})\s*$')
class CherryPick(Command):
common = True
helpSummary = "Cherry-pick a change."
helpUsage = """
%prog <sha1>
"""
helpDescription = """
'%prog' cherry-picks a change from one branch to another.
The change id will be updated, and a reference to the old
change id will be added.
"""
def _Options(self, p):
pass
def Execute(self, opt, args):
if len(args) != 1:
self.Usage()
reference = args[0]
p = GitCommand(None,
['rev-parse', '--verify', reference],
capture_stdout = True,
capture_stderr = True)
if p.Wait() != 0:
print(p.stderr, file=sys.stderr)
sys.exit(1)
sha1 = p.stdout.strip()
p = GitCommand(None, ['cat-file', 'commit', sha1], capture_stdout=True)
if p.Wait() != 0:
print("error: Failed to retrieve old commit message", file=sys.stderr)
sys.exit(1)
old_msg = self._StripHeader(p.stdout)
p = GitCommand(None,
['cherry-pick', sha1],
capture_stdout = True,
capture_stderr = True)
status = p.Wait()
print(p.stdout, file=sys.stdout)
print(p.stderr, file=sys.stderr)
if status == 0:
# The cherry-pick was applied correctly. We just need to edit the
# commit message.
new_msg = self._Reformat(old_msg, sha1)
p = GitCommand(None, ['commit', '--amend', '-F', '-'],
provide_stdin = True,
capture_stdout = True,
capture_stderr = True)
p.stdin.write(new_msg)
if p.Wait() != 0:
print("error: Failed to update commit message", file=sys.stderr)
sys.exit(1)
else:
print('NOTE: When committing (please see above) and editing the commit '
'message, please remove the old Change-Id-line and add:')
print(self._GetReference(sha1), file=sys.stderr)
print(file=sys.stderr)
def _IsChangeId(self, line):
return CHANGE_ID_RE.match(line)
def _GetReference(self, sha1):
return "(cherry picked from commit %s)" % sha1
def _StripHeader(self, commit_msg):
lines = commit_msg.splitlines()
return "\n".join(lines[lines.index("")+1:])
def _Reformat(self, old_msg, sha1):
new_msg = []
for line in old_msg.splitlines():
if not self._IsChangeId(line):
new_msg.append(line)
# Add a blank line between the message and the change id/reference
try:
if new_msg[-1].strip() != "":
new_msg.append("")
except IndexError:
pass
new_msg.append(self._GetReference(sha1))
return "\n".join(new_msg)
| apache-2.0 |
marckuz/django | tests/gis_tests/distapp/models.py | 259 | 1365 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
from ..utils import gisfield_may_be_null
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class SouthTexasCity(NamedModel):
"City model on projected coordinate system for South Texas."
point = models.PointField(srid=32140)
class SouthTexasCityFt(NamedModel):
"Same City model as above, but U.S. survey feet are the units."
point = models.PointField(srid=2278)
class AustraliaCity(NamedModel):
"City model for Australia, using WGS84."
point = models.PointField()
class CensusZipcode(NamedModel):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
poly = models.PolygonField(srid=4269)
class SouthTexasZipcode(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
class Interstate(NamedModel):
"Geodetic model for U.S. Interstates."
path = models.LineStringField()
class SouthTexasInterstate(NamedModel):
"Projected model for South Texas Interstates."
path = models.LineStringField(srid=32140)
| bsd-3-clause |
ShiYw/Sigil | 3rdparty/python/Lib/ftplib.py | 9 | 38299 | """An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
# Modified by Giampaolo Rodola' to add TLS support.
#
import os
import sys
import socket
import warnings
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP", "Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# The sizehint parameter passed to readline() calls
MAXLINE = 8192
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, OSError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
B_CRLF = b'\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
maxline = MAXLINE
sock = None
file = None
welcome = None
passiveserver = 1
encoding = "latin-1"
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.source_address = source_address
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def __enter__(self):
return self
# Context management protocol: try to quit() if active
def __exit__(self, *args):
if self.sock is not None:
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if self.sock is not None:
self.close()
def connect(self, host='', port=0, timeout=-999, source_address=None):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
- timeout: the timeout to set against the ftp socket(s)
- source_address: a 2-tuple (host, port) for the socket to bind
to as its source address before connecting.
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
if source_address is not None:
self.source_address = source_address
self.sock = socket.create_connection((self.host, self.port), self.timeout,
source_address=self.source_address)
self.af = self.sock.family
self.file = self.sock.makefile('r', encoding=self.encoding)
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print('*welcome*', self.sanitize(self.welcome))
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] in {'pass ', 'PASS '}:
i = len(s.rstrip('\r\n'))
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1:
print('*put*', self.sanitize(line))
self.sock.sendall(line.encode(self.encoding))
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print('*cmd*', self.sanitize(line))
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 1:
print('*get*', self.sanitize(line))
if not line:
raise EOFError
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging:
print('*resp*', self.sanitize(resp))
self.lastresp = resp[:3]
c = resp[:1]
if c in {'1', '2', '3'}:
return resp
if c == '4':
raise error_temp(resp)
if c == '5':
raise error_perm(resp)
raise error_proto(resp)
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[:1] != '2':
raise error_reply(resp)
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = b'ABOR' + B_CRLF
if self.debugging > 1:
print('*put urgent*', self.sanitize(line))
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in {'426', '225', '226'}:
raise error_proto(resp)
return resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto('unsupported address family')
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
err = None
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except OSError as _:
err = _
if sock:
sock.close()
sock = None
continue
break
if sock is None:
if err is not None:
raise err
else:
raise OSError("getaddrinfo returns an empty list")
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(self.timeout)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout,
source_address=self.source_address)
try:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply(resp)
except:
conn.close()
raise
else:
with self.makeport() as sock:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply(resp)
conn, sockaddr = sock.accept()
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
conn.settimeout(self.timeout)
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user:
user = 'anonymous'
if not passwd:
passwd = ''
if not acct:
acct = ''
if user == 'anonymous' and passwd in {'', '-'}:
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3':
resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3':
resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply(resp)
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, or NLST command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None:
callback = print_line
resp = self.sendcmd('TYPE A')
with self.transfercmd(cmd) as conn, \
conn.makefile('r', encoding=self.encoding) as fp:
while 1:
line = fp.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 2:
print('*retr*', repr(line))
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
each block of data after it is sent. [default: None]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
while 1:
buf = fp.read(blocksize)
if not buf:
break
conn.sendall(buf)
if callback:
callback(buf)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
with self.transfercmd(cmd) as conn:
while 1:
buf = fp.readline(self.maxline + 1)
if len(buf) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if not buf:
break
if buf[-2:] != B_CRLF:
if buf[-1] in B_CRLF: buf = buf[:-1]
buf = buf + B_CRLF
conn.sendall(buf)
if callback:
callback(buf)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def mlsd(self, path="", facts=[]):
'''List a directory in a standardized format by using MLSD
command (RFC-3659). If path is omitted the current directory
is assumed. "facts" is a list of strings representing the type
of information desired (e.g. ["type", "size", "perm"]).
Return a generator object yielding a tuple of two elements
for every file found in path.
First element is the file name, the second one is a dictionary
including a variable number of "facts" depending on the server
and whether "facts" argument has been provided.
'''
if facts:
self.sendcmd("OPTS MLST " + ";".join(facts) + ";")
if path:
cmd = "MLSD %s" % path
else:
cmd = "MLSD"
lines = []
self.retrlines(cmd, lines.append)
for line in lines:
facts_found, _, name = line.rstrip(CRLF).partition(' ')
entry = {}
for fact in facts_found[:-1].split(";"):
key, _, value = fact.partition("=")
entry[key.lower()] = value
yield (name, entry)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply(resp)
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in {'250', '200'}:
return resp
else:
raise error_reply(resp)
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm as msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
return int(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.voidcmd('MKD ' + dirname)
# fix around non-compliant implementations such as IIS shipped
# with Windows server 2003
if not resp.startswith('257'):
return ''
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.voidcmd('PWD')
# fix around non-compliant implementations such as IIS shipped
# with Windows server 2003
if not resp.startswith('257'):
return ''
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file is not None:
self.file.close()
if self.sock is not None:
self.sock.close()
self.file = self.sock = None
try:
import ssl
except ImportError:
_SSLSocket = None
else:
_SSLSocket = ssl.SSLSocket
class FTP_TLS(FTP):
'''A FTP subclass which adds TLS support to FTP as described
in RFC-4217.
Connect as usual to port 21 implicitly securing the FTP control
connection before authenticating.
Securing the data connection requires user to explicitly ask
for it by calling prot_p() method.
Usage example:
>>> from ftplib import FTP_TLS
>>> ftps = FTP_TLS('ftp.python.org')
>>> ftps.login() # login anonymously previously securing control channel
'230 Guest login ok, access restrictions apply.'
>>> ftps.prot_p() # switch to secure data connection
'200 Protection level set to P'
>>> ftps.retrlines('LIST') # list directory content securely
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftps.quit()
'221 Goodbye.'
>>>
'''
ssl_version = ssl.PROTOCOL_SSLv23
def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
certfile=None, context=None,
timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(self.ssl_version,
certfile=certfile,
keyfile=keyfile)
self.context = context
self._prot_p = False
FTP.__init__(self, host, user, passwd, acct, timeout, source_address)
def login(self, user='', passwd='', acct='', secure=True):
if secure and not isinstance(self.sock, ssl.SSLSocket):
self.auth()
return FTP.login(self, user, passwd, acct)
def auth(self):
'''Set up secure control connection by using TLS/SSL.'''
if isinstance(self.sock, ssl.SSLSocket):
raise ValueError("Already using TLS")
if self.ssl_version >= ssl.PROTOCOL_SSLv23:
resp = self.voidcmd('AUTH TLS')
else:
resp = self.voidcmd('AUTH SSL')
self.sock = self.context.wrap_socket(self.sock,
server_hostname=self.host)
self.file = self.sock.makefile(mode='r', encoding=self.encoding)
return resp
def ccc(self):
'''Switch back to a clear-text control connection.'''
if not isinstance(self.sock, ssl.SSLSocket):
raise ValueError("not using TLS")
resp = self.voidcmd('CCC')
self.sock = self.sock.unwrap()
return resp
def prot_p(self):
'''Set up secure data connection.'''
# PROT defines whether or not the data channel is to be protected.
# Though RFC-2228 defines four possible protection levels,
# RFC-4217 only recommends two, Clear and Private.
# Clear (PROT C) means that no security is to be used on the
# data-channel, Private (PROT P) means that the data-channel
# should be protected by TLS.
# PBSZ command MUST still be issued, but must have a parameter of
# '0' to indicate that no buffering is taking place and the data
# connection should not be encapsulated.
self.voidcmd('PBSZ 0')
resp = self.voidcmd('PROT P')
self._prot_p = True
return resp
def prot_c(self):
'''Set up clear text data connection.'''
resp = self.voidcmd('PROT C')
self._prot_p = False
return resp
# --- Overridden FTP methods
def ntransfercmd(self, cmd, rest=None):
conn, size = FTP.ntransfercmd(self, cmd, rest)
if self._prot_p:
conn = self.context.wrap_socket(conn,
server_hostname=self.host)
return conn, size
def abort(self):
# overridden as we can't pass MSG_OOB flag to sendall()
line = b'ABOR' + B_CRLF
self.sock.sendall(line)
resp = self.getmultiline()
if resp[:3] not in {'426', '225', '226'}:
raise error_proto(resp)
return resp
__all__.append('FTP_TLS')
all_errors = (Error, OSError, EOFError, ssl.SSLError)
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply(resp)
global _150_re
if _150_re is None:
import re
_150_re = re.compile(
"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII)
m = _150_re.match(resp)
if not m:
return None
return int(m.group(1))
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply(resp)
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)', re.ASCII)
m = _227_re.search(resp)
if not m:
raise error_proto(resp)
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply(resp)
left = resp.find('(')
if left < 0: raise error_proto(resp)
right = resp.find(')', left + 1)
if right < 0:
raise error_proto(resp) # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto(resp)
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto(resp)
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply(resp)
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print(line)
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname:
targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in {'125', '150'}:
raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in {'125', '150'}:
raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
warnings.warn("This class is deprecated, use the netrc module instead",
DeprecationWarning, 2)
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise OSError("specify file to load or set $HOME")
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line:
break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print(test.__doc__)
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except OSError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
| gpl-3.0 |
berkmancenter/mediacloud | apps/common/tests/python/mediawords/job/test_broker_lock.py | 1 | 1763 | import time
from typing import List, Type
from mediawords.job import JobBroker
from mediawords.util.log import create_logger
from .setup_broker_test import AbstractBrokerTestCase, Worker
log = create_logger(__name__)
class TestBrokerLock(AbstractBrokerTestCase):
@classmethod
def worker_paths(cls) -> List[Worker]:
workers_path = '/opt/mediacloud/tests/python/mediawords/job/test_broker_lock'
# Need 2+ workers to see the effect of locking
worker_count = 2
return [
Worker(
queue_name='TestPerlWorkerLock',
worker_path=f"{workers_path}/perl_worker.pl",
worker_count=worker_count,
),
Worker(
queue_name='TestPythonWorkerLock',
worker_path=f"{workers_path}/python_worker.py",
worker_count=worker_count,
),
]
@classmethod
def broker_class(cls) -> Type[JobBroker]:
return JobBroker
def test_lock(self):
lock_test_id = 123
for worker in self.WORKERS:
log.info("Adding the first job to the queue which will take 10+ seconds to run...")
job_id = worker.app.add_to_queue(test_id=lock_test_id, x=2, y=3)
log.info("Waiting for the job to reach the queue...")
time.sleep(2)
# While assuming that the first job is currently running (and thus is "locked"):
log.info("Testing if a subsequent job fails with a lock problem...")
assert worker.app.run_remotely(test_id=lock_test_id, x=3, y=4) is None, "Second job shouldn't work"
log.info("Waiting for the first job to finish...")
assert worker.app.get_result(job_id=job_id) == 5
| agpl-3.0 |
dsm054/pandas | pandas/tests/indexes/datetimes/test_partial_slicing.py | 1 | 15592 | """ test partial slicing on Series/Frame """
from datetime import datetime
import operator as op
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range)
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
class TestSlicing(object):
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
assert v1 == Timestamp('2/28/2005')
assert v2 == Timestamp('4/30/2005')
assert v3 == Timestamp('6/30/2005')
# don't carry freq through irregular slicing
assert dti2.freq is None
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
assert dr[1:].name == dr.name
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
date_range('2014-01-01', periods=20, freq='MS'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp(
'2014-10-01'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
date_range('2014-01-01', periods=20, freq='MS'))
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts[::0]
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts.loc[::0]
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts.loc[::0]
def test_slice_bounds_empty(self):
# GH 14354
empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015')
right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')
exp = Timestamp('2015-01-02 23:59:59.999999999')
assert right == exp
left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')
exp = Timestamp('2015-01-02 00:00:00')
assert left == exp
def test_slice_duplicate_monotonic(self):
# https://github.com/pandas-dev/pandas/issues/16515
idx = pd.DatetimeIndex(['2017', '2017'])
result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc')
expected = Timestamp('2017-01-01')
assert result == expected
def test_monotone_DTI_indexing_bug(self):
# GH 19362
# Testing accessing the first element in a montononic descending
# partial string indexing.
df = pd.DataFrame(list(range(5)))
date_list = ['2018-01-02', '2017-02-10', '2016-03-10',
'2015-03-15', '2014-03-16']
date_index = pd.to_datetime(date_list)
df['date'] = date_index
expected = pd.DataFrame({0: list(range(5)), 'date': date_index})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'A': [1, 2, 3]},
index=pd.date_range('20170101',
periods=3)[::-1])
expected = pd.DataFrame({'A': 1},
index=pd.date_range('20170103',
periods=1))
tm.assert_frame_equal(df.loc['2017-01-03'], expected)
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.loc['2005']
expected = df[df.index.year == 2005]
tm.assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
assert result == expected
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2001Q1']) == 90
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['1Q01']) == 90
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2005-11']) == 30
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['2005-11']) == 30
tm.assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
tm.assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-1-1']
assert result == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
tm.assert_series_equal(result, s.iloc[:24])
pytest.raises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60 * 4])
result = s['2005-1-1 20']
tm.assert_series_equal(result, s.iloc[:60])
assert s['2005-1-1 20:00'] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
tm.assert_series_equal(result, s.iloc[:60])
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60])
assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slice_second_precision(self):
rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59,
microsecond=999990),
periods=20, freq='US')
s = Series(np.arange(20), rng)
tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:])
tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:])
assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0]
with pytest.raises(KeyError, match='2005-1-1 00:00:00'):
s['2005-1-1 00:00:00']
def test_partial_slicing_dataframe(self):
# GH14856
# Test various combinations of string slicing resolution vs.
# index resolution
# - If string resolution is less precise than index resolution,
# string is considered a slice
# - If string resolution is equal to or more precise than index
# resolution, string is considered an exact match
formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H',
'%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S']
resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second']
for rnum, resolution in enumerate(resolutions[2:], 2):
# we check only 'day', 'hour', 'minute' and 'second'
unit = Timedelta("1 " + resolution)
middate = datetime(2012, 1, 1, 0, 0, 0)
index = DatetimeIndex([middate - unit,
middate, middate + unit])
values = [1, 2, 3]
df = DataFrame({'a': values}, index, dtype=np.int64)
assert df.index.resolution == resolution
# Timestamp with the same resolution as index
# Should be exact match for Series (return scalar)
# and raise KeyError for Frame
for timestamp, expected in zip(index, values):
ts_string = timestamp.strftime(formats[rnum])
# make ts_string as precise as index
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == expected
pytest.raises(KeyError, df.__getitem__, ts_string)
# Timestamp with resolution less precise than index
for fmt in formats[:rnum]:
for element, theslice in [[0, slice(None, 1)],
[1, slice(1, None)]]:
ts_string = index[element].strftime(fmt)
# Series should return slice
result = df['a'][ts_string]
expected = df['a'][theslice]
tm.assert_series_equal(result, expected)
# Frame should return slice as well
result = df[ts_string]
expected = df[theslice]
tm.assert_frame_equal(result, expected)
# Timestamp with resolution more precise than index
# Compatible with existing key
# Should return scalar for Series
# and raise KeyError for Frame
for fmt in formats[rnum + 1:]:
ts_string = index[1].strftime(fmt)
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == 2
pytest.raises(KeyError, df.__getitem__, ts_string)
# Not compatible with existing key
# Should raise KeyError
for fmt, res in list(zip(formats, resolutions))[rnum + 1:]:
ts = index[1] + Timedelta("1 " + res)
ts_string = ts.strftime(fmt)
pytest.raises(KeyError, df['a'].__getitem__, ts_string)
pytest.raises(KeyError, df.__getitem__, ts_string)
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT': ["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER': ["ABC", "MNP", "XYZ", "XYZ"],
'val': [1, 2, 3, 4]},
index=date_range("2013-06-19 09:30:00",
periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([
[1]
], index=Index(['ABC'], name='TICKER'), columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
tm.assert_frame_equal(result, expected)
expected = df_multi.loc[
(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
tm.assert_series_equal(result, expected)
# this is an IndexingError as we don't do partial string selection on
# multi-levels.
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
pytest.raises(IndexingError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range(
'2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
tm.assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
tm.assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.xs('2000-1-4')
result = df2.loc[pd.Timestamp('2000-1-4')]
tm.assert_frame_equal(result, expected)
def test_partial_slice_doesnt_require_monotonicity(self):
# For historical reasons.
s = pd.Series(np.arange(10), pd.date_range('2014-01-01', periods=10))
nonmonotonic = s[[3, 5, 4]]
expected = nonmonotonic.iloc[:0]
timestamp = pd.Timestamp('2014-01-10')
tm.assert_series_equal(nonmonotonic['2014-01-10':], expected)
with pytest.raises(KeyError,
match=r"Timestamp\('2014-01-10 00:00:00'\)"):
nonmonotonic[timestamp:]
tm.assert_series_equal(nonmonotonic.loc['2014-01-10':], expected)
with pytest.raises(KeyError,
match=r"Timestamp\('2014-01-10 00:00:00'\)"):
nonmonotonic.loc[timestamp:]
def test_loc_datetime_length_one(self):
# GH16071
df = pd.DataFrame(columns=['1'],
index=pd.date_range('2016-10-01T00:00:00',
'2016-10-01T23:59:59'))
result = df.loc[datetime(2016, 10, 1):]
tm.assert_frame_equal(result, df)
result = df.loc['2016-10-01T00:00:00':]
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize('datetimelike', [
Timestamp('20130101'), datetime(2013, 1, 1),
np.datetime64('2013-01-01T00:00', 'ns')])
@pytest.mark.parametrize('op,expected', [
(op.lt, [True, False, False, False]),
(op.le, [True, True, False, False]),
(op.eq, [False, True, False, False]),
(op.gt, [False, False, False, True])])
def test_selection_by_datetimelike(self, datetimelike, op, expected):
# GH issue #17965, test for ability to compare datetime64[ns] columns
# to datetimelike
df = DataFrame({'A': [pd.Timestamp('20120101'),
pd.Timestamp('20130101'),
np.nan, pd.Timestamp('20130103')]})
result = op(df.A, datetimelike)
expected = Series(expected, name='A')
tm.assert_series_equal(result, expected)
| bsd-3-clause |
blutooth/gp-svi | examples/data_mnist.py | 4 | 1590 | from __future__ import absolute_import
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
import os
import gzip
import struct
import array
import numpy as np
from urllib.request import urlretrieve
def download(url, filename):
if not os.path.exists('data'):
os.makedirs('data')
out_file = os.path.join('data', filename)
if not os.path.isfile(out_file):
urlretrieve(url, out_file)
def mnist():
base_url = 'http://yann.lecun.com/exdb/mnist/'
def parse_labels(filename):
with gzip.open(filename, 'rb') as fh:
magic, num_data = struct.unpack(">II", fh.read(8))
return np.array(array.array("B", fh.read()), dtype=np.uint8)
def parse_images(filename):
with gzip.open(filename, 'rb') as fh:
magic, num_data, rows, cols = struct.unpack(">IIII", fh.read(16))
return np.array(array.array("B", fh.read()), dtype=np.uint8).reshape(num_data, rows, cols)
for filename in ['train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz']:
download(base_url + filename, filename)
train_images = parse_images('data/train-images-idx3-ubyte.gz')
train_labels = parse_labels('data/train-labels-idx1-ubyte.gz')
test_images = parse_images('data/t10k-images-idx3-ubyte.gz')
test_labels = parse_labels('data/t10k-labels-idx1-ubyte.gz')
return train_images, train_labels, test_images, test_labels
| mit |
ppiotr/Bibedit-some-refactoring | modules/websession/lib/inveniogc.py | 4 | 21858 | ## -*- mode: python; coding: utf-8; -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio garbage collector.
"""
__revision__ = "$Id$"
import sys
import datetime
import time
import os
try:
from invenio.dbquery import run_sql
from invenio.config import CFG_LOGDIR, CFG_TMPDIR, CFG_CACHEDIR, \
CFG_WEBSEARCH_RSS_TTL, \
CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS
from invenio.bibtask import task_init, task_set_option, task_get_option, \
write_message, write_messages
from invenio.access_control_mailcookie import mail_cookie_gc
from invenio.bibdocfile import BibDoc
from invenio.bibsched import gc_tasks
except ImportError, e:
print "Error: %s" % (e, )
sys.exit(1)
# configure variables
CFG_MYSQL_ARGUMENTLIST_SIZE = 100
# After how many days to remove obsolete log/err files
CFG_MAX_ATIME_RM_LOG = 28
# After how many days to zip obsolete log/err files
CFG_MAX_ATIME_ZIP_LOG = 7
# After how many days to remove obsolete bibreformat fmt xml files
CFG_MAX_ATIME_RM_FMT = 28
# After how many days to zip obsolete bibreformat fmt xml files
CFG_MAX_ATIME_ZIP_FMT = 7
# After how many days to remove obsolete bibharvest fmt xml files
CFG_MAX_ATIME_RM_OAI = 28
# After how many days to zip obsolete bibharvest fmt xml files
CFG_MAX_ATIME_ZIP_OAI = 7
# After how many days to remove deleted bibdocs
CFG_DELETED_BIBDOC_MAXLIFE = 365*10
# AFter how many day to remove old cached webjournal files
CFG_WEBJOURNAL_TTL = 7
def gc_exec_command(command):
""" Exec the command logging in appropriate way its output."""
write_message(' %s' % command, verbose=9)
(dummy, output, errors) = os.popen3(command)
write_messages(errors.read())
write_messages(output.read())
def clean_logs():
""" Clean the logs from obsolete files. """
write_message("""CLEANING OF LOG FILES STARTED""")
write_message("- deleting/gzipping bibsched empty/old err/log "
"BibSched files")
vstr = task_get_option('verbose') > 1 and '-v' or ''
gc_exec_command('find %s -name "bibsched_task_*"'
' -size 0c -exec rm %s -f {} \;' \
% (CFG_LOGDIR, vstr))
gc_exec_command('find %s -name "bibsched_task_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_LOGDIR, CFG_MAX_ATIME_RM_LOG, vstr))
gc_exec_command('find %s -name "bibsched_task_*"'
' -atime +%s -exec gzip %s -9 {} \;' \
% (CFG_LOGDIR, CFG_MAX_ATIME_ZIP_LOG, vstr))
write_message("- deleting/gzipping temporary empty/old "
"BibReformat xml files")
gc_exec_command('find %s -name "rec_fmt_*"'
' -size 0c -exec rm %s -f {} \;' \
% (CFG_TMPDIR, vstr))
gc_exec_command('find %s -name "rec_fmt_*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_MAX_ATIME_RM_FMT, vstr))
gc_exec_command('find %s -name "rec_fmt_*"'
' -atime +%s -exec gzip %s -9 {} \;' \
% (CFG_TMPDIR, CFG_MAX_ATIME_ZIP_FMT, vstr))
write_message("- deleting/gzipping temporary old "
"BibHarvest xml files")
gc_exec_command('find %s -name "bibharvestadmin.*"'
' -exec rm %s -f {} \;' \
% (CFG_TMPDIR, vstr))
gc_exec_command('find %s -name "bibconvertrun.*"'
' -exec rm %s -f {} \;' \
% (CFG_TMPDIR, vstr))
gc_exec_command('find %s -name "oaiharvest*"'
' -atime +%s -exec gzip %s -9 {} \;' \
% (CFG_TMPDIR, CFG_MAX_ATIME_ZIP_OAI, vstr))
gc_exec_command('find %s -name "oaiharvest*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_MAX_ATIME_RM_OAI, vstr))
gc_exec_command('find %s -name "oai_archive*"'
' -atime +%s -exec rm %s -f {} \;' \
% (CFG_TMPDIR, CFG_MAX_ATIME_RM_OAI, vstr))
write_message("""CLEANING OF LOG FILES FINISHED""")
def clean_cache():
"""Clean the cache for expired and old files."""
write_message("""CLEANING OF OLD CACHED RSS REQUEST STARTED""")
rss_cache_dir = "%s/rss/" % CFG_CACHEDIR
try:
filenames = os.listdir(rss_cache_dir)
except OSError:
filenames = []
count = 0
for filename in filenames:
filename = os.path.join(rss_cache_dir, filename)
last_update_time = datetime.datetime.fromtimestamp(os.stat(os.path.abspath(filename)).st_mtime)
if not (datetime.datetime.now() < last_update_time + datetime.timedelta(minutes=CFG_WEBSEARCH_RSS_TTL)):
try:
os.remove(filename)
count += 1
except OSError, e:
write_message("Error: %s" % e)
write_message("""%s rss cache file pruned out of %s.""" % (count, len(filenames)))
write_message("""CLEANING OF OLD CACHED RSS REQUEST FINISHED""")
write_message("""CLEANING OF OLD CACHED WEBJOURNAL FILES STARTED""")
webjournal_cache_dir = "%s/webjournal/" % CFG_CACHEDIR
try:
filenames = os.listdir(webjournal_cache_dir)
except OSError:
filenames = []
count = 0
for filename in filenames:
filename = os.path.join(webjournal_cache_dir, filename)
last_update_time = datetime.datetime.fromtimestamp(os.stat(os.path.abspath(filename)).st_mtime)
if not (datetime.datetime.now() < last_update_time + datetime.timedelta(days=CFG_WEBJOURNAL_TTL)):
try:
os.remove(filename)
count += 1
except OSError, e:
write_message("Error: %s" % e)
write_message("""%s webjournal cache file pruned out of %s.""" % (count, len(filenames)))
write_message("""CLEANING OF OLD CACHED WEBJOURNAL FILES FINISHED""")
def clean_bibxxx():
"""
Clean unreferenced bibliographic values from bibXXx tables.
This is useful to prettify browse results, as it removes
old, no longer used values.
WARNING: this function must be run only when no bibupload is
running and/or sleeping.
"""
write_message("""CLEANING OF UNREFERENCED bibXXx VALUES STARTED""")
for xx in range(0, 100):
bibxxx = 'bib%02dx' % xx
bibrec_bibxxx = 'bibrec_bib%02dx' % xx
if task_get_option('verbose') >= 9:
num_unref_values = run_sql("""SELECT COUNT(*) FROM %(bibxxx)s
LEFT JOIN %(bibrec_bibxxx)s
ON %(bibxxx)s.id=%(bibrec_bibxxx)s.id_bibxxx
WHERE %(bibrec_bibxxx)s.id_bibrec IS NULL""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx,})[0][0]
run_sql("""DELETE %(bibxxx)s FROM %(bibxxx)s
LEFT JOIN %(bibrec_bibxxx)s
ON %(bibxxx)s.id=%(bibrec_bibxxx)s.id_bibxxx
WHERE %(bibrec_bibxxx)s.id_bibrec IS NULL""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx,})
if task_get_option('verbose') >= 9:
write_message(""" - %d unreferenced %s values cleaned""" % \
(num_unref_values, bibxxx))
write_message("""CLEANING OF UNREFERENCED bibXXx VALUES FINISHED""")
def clean_documents():
"""Delete all the bibdocs that have been set as deleted and have not been
modified since CFG_DELETED_BIBDOC_MAXLIFE days. Returns the number of
bibdocs involved."""
write_message("""CLEANING OF OBSOLETED DELETED DOCUMENTS STARTED""")
write_message("select id from bibdoc where status='DELETED' and NOW()>ADDTIME(modification_date, '%s 0:0:0')" % CFG_DELETED_BIBDOC_MAXLIFE, verbose=9)
records = run_sql("select id from bibdoc where status='DELETED' and NOW()>ADDTIME(modification_date, '%s 0:0:0')" % CFG_DELETED_BIBDOC_MAXLIFE)
for record in records:
bibdoc = BibDoc(record[0])
bibdoc.expunge()
write_message("DELETE FROM bibdoc WHERE id=%i" % int(record[0]), verbose=9)
run_sql("DELETE FROM bibdoc WHERE id=%s", (record[0], ))
write_message("""%s obsoleted deleted documents cleaned""" % len(records))
write_message("""CLEANING OF OBSOLETED DELETED DOCUMENTS FINISHED""")
return len(records)
def guest_user_garbage_collector():
"""Session Garbage Collector
program flow/tasks:
1: delete expired sessions
1b:delete guest users without session
2: delete queries not attached to any user
3: delete baskets not attached to any user
4: delete alerts not attached to any user
5: delete expired mailcookies
5b: delete expired not confirmed email address
6: delete expired roles memberships
verbose - level of program output.
0 - nothing
1 - default
9 - max, debug"""
# dictionary used to keep track of number of deleted entries
delcount = {'session': 0,
'user': 0,
'user_query': 0,
'query': 0,
'bskBASKET': 0,
'user_bskBASKET': 0,
'bskREC': 0,
'bskRECORDCOMMENT': 0,
'bskEXTREC': 0,
'bskEXTFMT': 0,
'user_query_basket': 0,
'mail_cookie': 0,
'email_addresses': 0,
'role_membership' : 0}
write_message("CLEANING OF GUEST SESSIONS STARTED")
# 1 - DELETE EXPIRED SESSIONS
write_message("- deleting expired sessions")
timelimit = time.time()
write_message(" DELETE FROM session WHERE"
" session_expiry < %d \n" % (timelimit, ), verbose=9)
delcount['session'] += run_sql("DELETE FROM session WHERE"
" session_expiry < %s """ % (timelimit, ))
# 1b - DELETE GUEST USERS WITHOUT SESSION
write_message("- deleting guest users without session")
# get uids
write_message(""" SELECT u.id\n FROM user AS u LEFT JOIN session AS s\n ON u.id = s.uid\n WHERE s.uid IS NULL AND u.email = ''""", verbose=9)
result = run_sql("""SELECT u.id
FROM user AS u LEFT JOIN session AS s
ON u.id = s.uid
WHERE s.uid IS NULL AND u.email = ''""")
write_message(result, verbose=9)
if result:
# work on slices of result list in case of big result
for i in range(0, len(result), CFG_MYSQL_ARGUMENTLIST_SIZE):
# create string of uids
uidstr = ''
for (id_user, ) in result[i:i+CFG_MYSQL_ARGUMENTLIST_SIZE]:
if uidstr: uidstr += ','
uidstr += "%s" % (id_user, )
# delete users
write_message(" DELETE FROM user WHERE"
" id IN (TRAVERSE LAST RESULT) AND email = '' \n", verbose=9)
delcount['user'] += run_sql("DELETE FROM user WHERE"
" id IN (%s) AND email = ''" % (uidstr, ))
# 2 - DELETE QUERIES NOT ATTACHED TO ANY USER
# first step, delete from user_query
write_message("- deleting user_queries referencing"
" non-existent users")
# find user_queries referencing non-existent users
write_message(" SELECT DISTINCT uq.id_user\n"
" FROM user_query AS uq LEFT JOIN user AS u\n"
" ON uq.id_user = u.id\n WHERE u.id IS NULL", verbose=9)
result = run_sql("""SELECT DISTINCT uq.id_user
FROM user_query AS uq LEFT JOIN user AS u
ON uq.id_user = u.id
WHERE u.id IS NULL""")
write_message(result, verbose=9)
# delete in user_query one by one
write_message(" DELETE FROM user_query WHERE"
" id_user = 'TRAVERSE LAST RESULT' \n", verbose=9)
for (id_user, ) in result:
delcount['user_query'] += run_sql("""DELETE FROM user_query
WHERE id_user = %s""" % (id_user, ))
# delete the actual queries
write_message("- deleting queries not attached to any user")
# select queries that must be deleted
write_message(""" SELECT DISTINCT q.id\n FROM query AS q LEFT JOIN user_query AS uq\n ON uq.id_query = q.id\n WHERE uq.id_query IS NULL AND\n q.type <> 'p' """, verbose=9)
result = run_sql("""SELECT DISTINCT q.id
FROM query AS q LEFT JOIN user_query AS uq
ON uq.id_query = q.id
WHERE uq.id_query IS NULL AND
q.type <> 'p'""")
write_message(result, verbose=9)
# delete queries one by one
write_message(""" DELETE FROM query WHERE id = 'TRAVERSE LAST RESULT \n""", verbose=9)
for (id_user, ) in result:
delcount['query'] += run_sql("""DELETE FROM query WHERE id = %s""", (id_user, ))
# 3 - DELETE BASKETS NOT OWNED BY ANY USER
write_message("- deleting baskets not owned by any user")
# select basket ids
write_message(""" SELECT ub.id_bskBASKET\n FROM user_bskBASKET AS ub LEFT JOIN user AS u\n ON u.id = ub.id_user\n WHERE u.id IS NULL""", verbose=9)
try:
result = run_sql("""SELECT ub.id_bskBASKET
FROM user_bskBASKET AS ub LEFT JOIN user AS u
ON u.id = ub.id_user
WHERE u.id IS NULL""")
except:
result = []
write_message(result, verbose=9)
# delete from user_basket and basket one by one
write_message(""" DELETE FROM user_bskBASKET WHERE id_bskBASKET = 'TRAVERSE LAST RESULT' """, verbose=9)
write_message(""" DELETE FROM bskBASKET WHERE id = 'TRAVERSE LAST RESULT' """, verbose=9)
write_message(""" DELETE FROM bskREC WHERE id_bskBASKET = 'TRAVERSE LAST RESULT'""", verbose=9)
write_message(""" DELETE FROM bskRECORDCOMMENT WHERE id_bskBASKET = 'TRAVERSE LAST RESULT' \n""", verbose=9)
for (id_basket, ) in result:
delcount['user_bskBASKET'] += run_sql("""DELETE FROM user_bskBASKET WHERE id_bskBASKET = %s""", (id_basket, ))
delcount['bskBASKET'] += run_sql("""DELETE FROM bskBASKET WHERE id = %s""", (id_basket, ))
delcount['bskREC'] += run_sql("""DELETE FROM bskREC WHERE id_bskBASKET = %s""", (id_basket, ))
delcount['bskRECORDCOMMENT'] += run_sql("""DELETE FROM bskRECORDCOMMENT WHERE id_bskBASKET = %s""", (id_basket, ))
write_message(""" SELECT DISTINCT ext.id, rec.id_bibrec_or_bskEXTREC FROM bskEXTREC AS ext \nLEFT JOIN bskREC AS rec ON ext.id=-rec.id_bibrec_or_bskEXTREC WHERE id_bibrec_or_bskEXTREC is NULL""", verbose=9)
try:
result = run_sql("""SELECT DISTINCT ext.id FROM bskEXTREC AS ext
LEFT JOIN bskREC AS rec ON ext.id=-rec.id_bibrec_or_bskEXTREC
WHERE id_bibrec_or_bskEXTREC is NULL""")
except:
result = []
write_message(result, verbose=9)
write_message(""" DELETE FROM bskEXTREC WHERE id = 'TRAVERSE LAST RESULT' """, verbose=9)
write_message(""" DELETE FROM bskEXTFMT WHERE id_bskEXTREC = 'TRAVERSE LAST RESULT' \n""", verbose=9)
for (id_basket, ) in result:
delcount['bskEXTREC'] += run_sql("""DELETE FROM bskEXTREC WHERE id=%s""", (id_basket,))
delcount['bskEXTFMT'] += run_sql("""DELETE FROM bskEXTFMT WHERE id_bskEXTREC=%s""", (id_basket,))
# 4 - DELETE ALERTS NOT OWNED BY ANY USER
write_message('- deleting alerts not owned by any user')
# select user ids in uqb that reference non-existent users
write_message("""SELECT DISTINCT uqb.id_user FROM user_query_basket AS uqb LEFT JOIN user AS u ON uqb.id_user = u.id WHERE u.id IS NULL""", verbose=9)
result = run_sql("""SELECT DISTINCT uqb.id_user FROM user_query_basket AS uqb LEFT JOIN user AS u ON uqb.id_user = u.id WHERE u.id IS NULL""")
write_message(result, verbose=9)
# delete all these entries
for (id_user, ) in result:
write_message("""DELETE FROM user_query_basket WHERE id_user = 'TRAVERSE LAST RESULT """, verbose=9)
delcount['user_query_basket'] += run_sql("""DELETE FROM user_query_basket WHERE id_user = %s """, (id_user, ))
# 5 - delete expired mailcookies
write_message("""mail_cookie_gc()""", verbose=9)
delcount['mail_cookie'] = mail_cookie_gc()
## 5b - delete expired not confirmed email address
write_message("""DELETE FROM user WHERE note='2' AND NOW()>ADDTIME(last_login, '%s 0:0:0')""" % CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS, verbose=9)
delcount['email_addresses'] = run_sql("""DELETE FROM user WHERE note='2' AND NOW()>ADDTIME(last_login, '%s 0:0:0')""" % CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS)
# 6 - delete expired roles memberships
write_message("""DELETE FROM user_accROLE WHERE expiration<NOW()""", verbose=9)
delcount['role_membership'] = run_sql("""DELETE FROM user_accROLE WHERE expiration<NOW()""")
# print STATISTICS
write_message("""- statistics about deleted data: """)
write_message(""" %7s sessions.""" % (delcount['session'], ))
write_message(""" %7s users.""" % (delcount['user'], ))
write_message(""" %7s user_queries.""" % (delcount['user_query'], ))
write_message(""" %7s queries.""" % (delcount['query'], ))
write_message(""" %7s baskets.""" % (delcount['bskBASKET'], ))
write_message(""" %7s user_baskets.""" % (delcount['user_bskBASKET'], ))
write_message(""" %7s basket_records.""" % (delcount['bskREC'], ))
write_message(""" %7s basket_external_records.""" % (delcount['bskEXTREC'], ))
write_message(""" %7s basket_external_formats.""" % (delcount['bskEXTFMT'], ))
write_message(""" %7s basket_comments.""" % (delcount['bskRECORDCOMMENT'], ))
write_message(""" %7s user_query_baskets.""" % (delcount['user_query_basket'], ))
write_message(""" %7s mail_cookies.""" % (delcount['mail_cookie'], ))
write_message(""" %7s non confirmed email addresses.""" % delcount['email_addresses'])
write_message(""" %7s role_memberships.""" % (delcount['role_membership'], ))
write_message("""CLEANING OF GUEST SESSIONS FINISHED""")
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runinveniogc',
authorization_msg="InvenioGC Task Submission",
help_specific_usage=" -l, --logs\t\tClean old logs and temporary files.\n" \
" -g, --guests\t\tClean expired guest user related information. [default action]\n" \
" -b, --bibxxx\t\tClean unreferenced bibliographic values in bibXXx tables.\n" \
" -c, --cache\t\tClean cache by removing old files.\n" \
" -d, --documents\tClean deleted documents and revisions older than %s days.\n" \
" -T, --tasks\t\tClean the BibSched queue removing/archiving old DONE tasks.\n" \
" -a, --all\t\tClean all of the above.\n" % CFG_DELETED_BIBDOC_MAXLIFE,
version=__revision__,
specific_params=("lgbdacT", ["logs", "guests", "bibxxx", "documents", "all", "cache", "tasks"]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_submit_check_options_fnc=task_submit_check_options,
task_run_fnc=task_run_core)
def task_submit_check_options():
if not task_get_option('logs') and \
not task_get_option('guests') and \
not task_get_option('bibxxx') and \
not task_get_option('documents') and \
not task_get_option('cache') and \
not task_get_option('tasks'):
task_set_option('sessions', True)
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ('-l', '--logs'):
task_set_option('logs', True)
return True
elif key in ('-g', '--guests'):
task_set_option('guests', True)
return True
elif key in ('-b', '--bibxxx'):
task_set_option('bibxxx', True)
return True
elif key in ('-d', '--documents'):
task_set_option('documents', True)
return True
elif key in ('-c', '--cache'):
task_set_option('cache', True)
return True
elif key in ('-t', '--tasks'):
task_set_option('tasks', True)
return True
elif key in ('-a', '--all'):
task_set_option('logs', True)
task_set_option('guests', True)
task_set_option('bibxxx', True)
task_set_option('documents', True)
task_set_option('cache', True)
task_set_option('tasks', True)
return True
return False
def task_run_core():
""" Reimplement to add the body of the task."""
if task_get_option('guests'):
guest_user_garbage_collector()
if task_get_option('logs'):
clean_logs()
if task_get_option('bibxxx'):
clean_bibxxx()
if task_get_option('documents'):
clean_documents()
if task_get_option('cache'):
clean_cache()
if task_get_option('tasks'):
gc_tasks()
return True
if __name__ == '__main__':
main()
| gpl-2.0 |
sajeeshcs/nested_quota_latest | nova/scheduler/filters/trusted_filter.py | 6 | 9890 | # Copyright (c) 2012 Intel, Inc.
# Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter to add support for Trusted Computing Pools.
Filter that only schedules tasks on a host if the integrity (trust)
of that host matches the trust requested in the ``extra_specs`` for the
flavor. The ``extra_specs`` will contain a key/value pair where the
key is ``trust``. The value of this pair (``trusted``/``untrusted``) must
match the integrity of that host (obtained from the Attestation
service) before the task can be scheduled on that host.
Note that the parameters to control access to the Attestation Service
are in the ``nova.conf`` file in a separate ``trust`` section. For example,
the config file will look something like:
[DEFAULT]
verbose=True
...
[trust]
server=attester.mynetwork.com
Details on the specific parameters can be found in the file
``trust_attest.py``.
Details on setting up and using an Attestation Service can be found at
the Open Attestation project at:
https://github.com/OpenAttestation/OpenAttestation
"""
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
import requests
from nova import context
from nova import db
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
trusted_opts = [
cfg.StrOpt('attestation_server',
help='Attestation server HTTP'),
cfg.StrOpt('attestation_server_ca_file',
help='Attestation server Cert file for Identity verification'),
cfg.StrOpt('attestation_port',
default='8443',
help='Attestation server port'),
cfg.StrOpt('attestation_api_url',
default='/OpenAttestationWebServices/V1.0',
help='Attestation web API URL'),
cfg.StrOpt('attestation_auth_blob',
help='Attestation authorization blob - must change'),
cfg.IntOpt('attestation_auth_timeout',
default=60,
help='Attestation status cache valid period length'),
cfg.BoolOpt('attestation_insecure_ssl',
default=False,
help='Disable SSL cert verification for Attestation service')
]
CONF = cfg.CONF
trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters')
CONF.register_group(trust_group)
CONF.register_opts(trusted_opts, group=trust_group)
class AttestationService(object):
# Provide access wrapper to attestation server to get integrity report.
def __init__(self):
self.api_url = CONF.trusted_computing.attestation_api_url
self.host = CONF.trusted_computing.attestation_server
self.port = CONF.trusted_computing.attestation_port
self.auth_blob = CONF.trusted_computing.attestation_auth_blob
self.key_file = None
self.cert_file = None
self.ca_file = CONF.trusted_computing.attestation_server_ca_file
self.request_count = 100
# If the CA file is not provided, let's check the cert if verification
# asked
self.verify = (not CONF.trusted_computing.attestation_insecure_ssl
and self.ca_file or True)
self.cert = (self.cert_file, self.key_file)
def _do_request(self, method, action_url, body, headers):
# Connects to the server and issues a request.
# :returns: result data
# :raises: IOError if the request fails
action_url = "https://%s:%s%s/%s" % (self.host, self.port,
self.api_url, action_url)
try:
res = requests.request(method, action_url, data=body,
headers=headers, cert=self.cert,
verify=self.verify)
status_code = res.status_code
if status_code in (requests.codes.OK,
requests.codes.CREATED,
requests.codes.ACCEPTED,
requests.codes.NO_CONTENT):
try:
return requests.codes.OK, jsonutils.loads(res.text)
except (TypeError, ValueError):
return requests.codes.OK, res.text
return status_code, None
except requests.exceptions.RequestException:
return IOError, None
def _request(self, cmd, subcmd, hosts):
body = {}
body['count'] = len(hosts)
body['hosts'] = hosts
cooked = jsonutils.dumps(body)
headers = {}
headers['content-type'] = 'application/json'
headers['Accept'] = 'application/json'
if self.auth_blob:
headers['x-auth-blob'] = self.auth_blob
status, res = self._do_request(cmd, subcmd, cooked, headers)
return status, res
def do_attestation(self, hosts):
"""Attests compute nodes through OAT service.
:param hosts: hosts list to be attested
:returns: dictionary for trust level and validate time
"""
result = None
status, data = self._request("POST", "PollHosts", hosts)
if data is not None:
result = data.get('hosts')
return result
class ComputeAttestationCache(object):
"""Cache for compute node attestation
Cache compute node's trust level for sometime,
if the cache is out of date, poll OAT service to flush the
cache.
OAT service may have cache also. OAT service's cache valid time
should be set shorter than trusted filter's cache valid time.
"""
def __init__(self):
self.attestservice = AttestationService()
self.compute_nodes = {}
admin = context.get_admin_context()
# Fetch compute node list to initialize the compute_nodes,
# so that we don't need poll OAT service one by one for each
# host in the first round that scheduler invokes us.
computes = db.compute_node_get_all(admin)
for compute in computes:
host = compute['hypervisor_hostname']
self._init_cache_entry(host)
def _cache_valid(self, host):
cachevalid = False
if host in self.compute_nodes:
node_stats = self.compute_nodes.get(host)
if not timeutils.is_older_than(
node_stats['vtime'],
CONF.trusted_computing.attestation_auth_timeout):
cachevalid = True
return cachevalid
def _init_cache_entry(self, host):
self.compute_nodes[host] = {
'trust_lvl': 'unknown',
'vtime': timeutils.normalize_time(
timeutils.parse_isotime("1970-01-01T00:00:00Z"))}
def _invalidate_caches(self):
for host in self.compute_nodes:
self._init_cache_entry(host)
def _update_cache_entry(self, state):
entry = {}
host = state['host_name']
entry['trust_lvl'] = state['trust_lvl']
try:
# Normalize as naive object to interoperate with utcnow().
entry['vtime'] = timeutils.normalize_time(
timeutils.parse_isotime(state['vtime']))
except ValueError:
try:
# Mt. Wilson does not necessarily return an ISO8601 formatted
# `vtime`, so we should try to parse it as a string formatted
# datetime.
vtime = timeutils.parse_strtime(state['vtime'], fmt="%c")
entry['vtime'] = timeutils.normalize_time(vtime)
except ValueError:
# Mark the system as un-trusted if get invalid vtime.
entry['trust_lvl'] = 'unknown'
entry['vtime'] = timeutils.utcnow()
self.compute_nodes[host] = entry
def _update_cache(self):
self._invalidate_caches()
states = self.attestservice.do_attestation(self.compute_nodes.keys())
if states is None:
return
for state in states:
self._update_cache_entry(state)
def get_host_attestation(self, host):
"""Check host's trust level."""
if host not in self.compute_nodes:
self._init_cache_entry(host)
if not self._cache_valid(host):
self._update_cache()
level = self.compute_nodes.get(host).get('trust_lvl')
return level
class ComputeAttestation(object):
def __init__(self):
self.caches = ComputeAttestationCache()
def is_trusted(self, host, trust):
level = self.caches.get_host_attestation(host)
return trust == level
class TrustedFilter(filters.BaseHostFilter):
"""Trusted filter to support Trusted Compute Pools."""
def __init__(self):
self.compute_attestation = ComputeAttestation()
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
instance_type = filter_properties.get('instance_type', {})
extra = instance_type.get('extra_specs', {})
trust = extra.get('trust:trusted_host')
host = host_state.nodename
if trust:
return self.compute_attestation.is_trusted(host, trust)
return True
| apache-2.0 |
glennbarrett/TekDefense-Automater | utilities.py | 16 | 18133 | """
The utilities.py module handles all utility functions that Automater
requires.
Class(es):
Parser -- Class to handle standard argparse functions with
a class-based structure.
IPWrapper -- Class to provide IP Address formatting and parsing.
Function(s):
No global exportable functions are defined.
Exception(s):
No exceptions exported.
"""
import argparse
import re
import os
class Parser(object):
"""
Parser represents an argparse object representing the
program's input parameters.
Public Method(s):
hasHTMLOutFile
(Property) HTMLOutFile
hasTextOutFile
(Property) TextOutFile
hasCSVOutSet
(Property) CSVOutFile
(Property) Delay
hasProxy
(Property) Proxy
print_help
hasTarget
hasNoTarget
(Property) Target
hasInputFile
(Property) Source
hasSource
hasPost
(Property) InputFile
(Property) UserAgent
Instance variable(s):
_parser
args
"""
def __init__(self, desc):
"""
Class constructor. Adds the argparse info into the instance variables.
Argument(s):
desc -- ArgumentParser description.
Return value(s):
Nothing is returned from this Method.
"""
# Adding arguments
self._parser = argparse.ArgumentParser(description = desc)
self._parser.add_argument('target', help = 'List one IP Address (CIDR or dash notation accepted), URL or Hash to query or pass the filename of a file containing IP Address info, URL or Hash to query each separated by a newline.')
self._parser.add_argument('-o', '--output', help = 'This option will output the results to a file.')
self._parser.add_argument('-f', '--cef', help = 'This option will output the results to a CEF formatted file.')
self._parser.add_argument('-w', '--web', help = 'This option will output the results to an HTML file.')
self._parser.add_argument('-c', '--csv', help = 'This option will output the results to a CSV file.')
self._parser.add_argument('-d', '--delay', type=int, default = 2, help = 'This will change the delay to the inputted seconds. Default is 2.')
self._parser.add_argument('-s', '--source', help = 'This option will only run the target against a specific source engine to pull associated domains. Options are defined in the name attribute of the site element in the XML configuration file')
self._parser.add_argument('--p', '--post', action = "store_true", help = 'This option tells the program to post information to sites that allow posting. By default the program will NOT post to sites that require a post.')
self._parser.add_argument('--proxy', help = 'This option will set a proxy to use (eg. proxy.example.com:8080)')
self._parser.add_argument('-a', '--useragent', default = 'Automater/2.1', help = 'This option allows the user to set the user-agent seen by web servers being utilized. By default, the user-agent is set to Automater/version')
self.args = self._parser.parse_args()
def hasCEFOutFile(self):
"""
Checks to determine if user requested an output file formatted in CEF.
Returns True if user requested CEF output, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.cef:
return True
else:
return False
@property
def CEFOutFile(self):
"""
Checks if there is an CEF output requested.
Returns string name of CEF output file if requested
or None if not requested.
Argument(s):
No arguments are required.
Return value(s):
string -- Name of an output file to write to system.
None -- if CEF output was not requested.
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasCEFOutFile():
return self.args.cef
else:
return None
def hasHTMLOutFile(self):
"""
Checks to determine if user requested an output file formatted in HTML.
Returns True if user requested HTML output, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.web:
return True
else:
return False
@property
def HTMLOutFile(self):
"""
Checks if there is an HTML output requested.
Returns string name of HTML output file if requested
or None if not requested.
Argument(s):
No arguments are required.
Return value(s):
string -- Name of an output file to write to system.
None -- if web output was not requested.
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasHTMLOutFile():
return self.args.web
else:
return None
def hasTextOutFile(self):
"""
Checks to determine if user requested an output text file.
Returns True if user requested text file output, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.output:
return True
else:
return False
@property
def TextOutFile(self):
"""
Checks if there is a text output requested.
Returns string name of text output file if requested
or None if not requested.
Argument(s):
No arguments are required.
Return value(s):
string -- Name of an output file to write to system.
None -- if output file was not requested.
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasTextOutFile():
return self.args.output
else:
return None
def hasCSVOutSet(self):
"""
Checks to determine if user requested an output file delimited by commas.
Returns True if user requested file output, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.csv:
return True
else:
return False
@property
def CSVOutFile(self):
"""
Checks if there is a comma delimited output requested.
Returns string name of comma delimited output file if requested
or None if not requested.
Argument(s):
No arguments are required.
Return value(s):
string -- Name of an comma delimited file to write to system.
None -- if comma delimited output was not requested.
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasCSVOutSet():
return self.args.csv
else:
return None
@property
def Delay(self):
"""
Returns delay set by input parameters to the program.
Argument(s):
No arguments are required.
Return value(s):
string -- String containing integer to tell program how long to delay
between each site query. Default delay is 2 seconds.
Restriction(s):
This Method is tagged as a Property.
"""
return self.args.delay
def hasProxy(self):
"""
Checks to determine if user requested a proxy.
Returns True if user requested a proxy, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.proxy:
return True
else:
return False
@property
def Proxy(self):
"""
Returns proxy set by input parameters to the program.
Argument(s):
No arguments are required.
Return value(s):
string -- String containing proxy server in format server:port,
default is none
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasProxy():
return self.args.proxy
else:
return None
def print_help(self):
"""
Returns standard help information to determine usage for program.
Argument(s):
No arguments are required.
Return value(s):
string -- Standard argparse help information to show program usage.
Restriction(s):
This Method has no restrictions.
"""
self._parser.print_help()
def hasTarget(self):
"""
Checks to determine if a target was provided to the program.
Returns True if a target was provided, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.target is None:
return False
else:
return True
def hasNoTarget(self):
"""
Checks to determine if a target was provided to the program.
Returns False if a target was provided, True if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
return not(self.hasTarget())
@property
def Target(self):
"""
Checks to determine the target info provided to the program.
Returns string name of target or string name of file
or None if a target is not provided.
Argument(s):
No arguments are required.
Return value(s):
string -- String target info or filename based on target parameter to program.
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasNoTarget():
return None
else:
return self.args.target
def hasInputFile(self):
"""
Checks to determine if input file is the target of the program.
Returns True if a target is an input file, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if os.path.exists(self.args.target) and os.path.isfile(self.args.target):
return True
else:
return False
@property
def Source(self):
"""
Checks to determine if a source parameter was provided to the program.
Returns string name of source or None if a source is not provided
Argument(s):
No arguments are required.
Return value(s):
string -- String source name based on source parameter to program.
None -- If the -s parameter is not used.
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasSource():
return self.args.source
else:
return None
def hasSource(self):
"""
Checks to determine if -s parameter and source name
was provided to the program.
Returns True if source name was provided, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.source:
return True
else:
return False
def hasPost(self):
"""
Checks to determine if --p parameter was provided to the program.
Returns True if --p was provided, False if not.
Argument(s):
No arguments are required.
Return value(s):
Boolean.
Restriction(s):
The Method has no restrictions.
"""
if self.args.p:
return True
else:
return False
@property
def InputFile(self):
"""
Checks to determine if an input file string representation of
a target was provided as a parameter to the program.
Returns string name of file or None if file name is not provided
Argument(s):
No arguments are required.
Return value(s):
string -- String file name based on target filename parameter to program.
None -- If the target is not a filename.
Restriction(s):
This Method is tagged as a Property.
"""
if self.hasNoTarget():
return None
elif self.hasInputFile():
return self.Target
else:
return None
@property
def UserAgent(self):
"""
Returns useragent setting invoked by user at command line or the default
user agent provided by the program.
Argument(s):
No arguments are required.
Return value(s):
string -- Name utilized as the useragent for the program.
Restriction(s):
This Method is tagged as a Property.
"""
return self.args.useragent
class IPWrapper(object):
"""
IPWrapper provides Class Methods to enable checks
against strings to determine if the string is an IP Address
or an IP Address in CIDR or dash notation.
Public Method(s):
(Class Method) isIPorIPList
(Class Method) getTarget
Instance variable(s):
No instance variables.
"""
@classmethod
def isIPorIPList(self, target):
"""
Checks if an input string is an IP Address or if it is
an IP Address in CIDR or dash notation.
Returns True if IP Address or CIDR/dash. Returns False if not.
Argument(s):
target -- string target provided as the first argument to the program.
Return value(s):
Boolean.
Restriction(s):
This Method is tagged as a Class Method
"""
# IP Address range using prefix syntax
ipRangePrefix = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d{1,2}')
ipRgeFind = re.findall(ipRangePrefix,target)
if (ipRgeFind is not None or len(ipRgeFind) != 0):
return True
ipRangeDash = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}-\d{1,3}')
ipRgeDashFind = re.findall(ipRangeDash,target)
if (ipRgeDashFind is not None or len(ipRgeDashFind) != 0):
return True
ipAddress = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
ipFind = re.findall(ipAddress,target)
if (ipFind is not None and len(ipFind) != 0):
return True
return False
@classmethod
def getTarget(self, target):
"""
Determines whether the target provided is an IP Address or
an IP Address in CIDR or dash notation. Then creates a list
that can be utilized as targets by the program.
Returns a list of string IP Addresses that can be used as targets.
Argument(s):
target -- string target provided as the first argument to the program.
Return value(s):
Iterator of string(s) representing IP Addresses.
Restriction(s):
This Method is tagged as a Class Method
"""
# IP Address range using prefix syntax
ipRangePrefix = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d{1,2}', re.IGNORECASE)
ipRgeFind = re.findall(ipRangePrefix, target)
ipRangeDash = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}-\d{1,3}')
ipRgeDashFind = re.findall(ipRangeDash, target)
ipAddress = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
ipFind = re.findall(ipAddress, target)
if ipRgeFind is not None and len(ipRgeFind) > 0:
# this can be used if we ever get bigger than a class C
# but truthfully we don't need to split the whole address
# since we'll only be using the last octet.
iplist = target[:target.index("/")].split(".")
ipprefix = givenipprefix=target[target.index("/")+1:]
# create a bytearry to hold the one byte
# this would be 4 bytes for IPv4 and gives us the capability to grow
# if we ever want to go larger than a class C
bytearr = bytearray(2)
bytearr[0] = int(iplist[3])
# prefix must be class C or larger
if int(givenipprefix) < 24:
ipprefix = 24
if int(givenipprefix) > 32 or int(givenipprefix) == 31:
ipprefix = 32
bytearr[1]=0
else:
bytearr[1]=pow(2,32-int(ipprefix))#-1
if bytearr[0]>bytearr[1]:
start=bytearr[0]
last=bytearr[0]^bytearr[1]
else:
start=bytearr[0]
last=bytearr[1]
if start == last:
yield target[:target.rindex(".")+1]+str(start)
if start<last:
for lastoctet in range(start,last):
yield target[:target.rindex(".")+1]+str(lastoctet)
else:
yield target[:target.rindex(".")+1]+str(start)
# IP Address range seperated with a dash
elif ipRgeDashFind is not None and len(ipRgeDashFind) > 0:
iplist = target[:target.index("-")].split(".")
iplast = target[target.index("-")+1:]
if int(iplist[3])<int(iplast):
for lastoctet in range(int(iplist[3]),int(iplast)+1):
yield target[:target.rindex(".")+1]+str(lastoctet)
else:
yield target[:target.rindex(".")+1]+str(iplist[3])
# it's just an IP address at this point
else:
yield target
| mit |
shadmazumder/infer | infer/bin/jwlib.py | 3 | 1925 | # Copyright (c) 2009 - 2013 Monoidics ltd.
# Copyright (c) 2013 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse
import os
import tempfile
import subprocess
import utils
# javac options
parser = argparse.ArgumentParser()
current_directory = os.getcwd()
parser.add_argument('-version', action='store_true')
parser.add_argument('-cp', '-classpath', type=str, dest='classpath')
parser.add_argument('-bootclasspath', type=str)
parser.add_argument('-d', dest='classes_out', default=current_directory)
class CompilerCall:
def __init__(self, arguments):
self.original_arguments = arguments
self.args, _ = parser.parse_known_args(arguments)
self.verbose_out = None
def run(self):
if self.args.version:
return subprocess.call(['javac'] + self.original_arguments)
else:
javac_cmd = ['javac', '-verbose', '-g'] + self.original_arguments
javac_cmd.append('-J-Duser.language=en')
with tempfile.NamedTemporaryFile(
mode='w',
suffix='.out',
prefix='javac_',
delete=False) as file_out:
self.verbose_out = file_out.name
try:
subprocess.check_call(javac_cmd, stderr=file_out)
except subprocess.CalledProcessError:
error_msg = 'Javac compilation error with: \n\n{}\n'
failing_cmd = [arg for arg in javac_cmd
if arg != '-verbose']
utils.error(error_msg.format(failing_cmd))
subprocess.check_call(failing_cmd)
return os.EX_OK
| bsd-3-clause |
pch957/python-bts-v0.9 | scripts/bts_delegate_watch.py | 1 | 5880 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from bts.api import BTS
import json
import logging
import logging.handlers
import os
import time
import smtplib
class DelegateWatch(object):
def __init__(self):
self.confirm = 2
self.load_config()
self.init_bts()
self.setup_log()
self.init_watch()
self.init_contact()
def load_config(self):
config_file = os.getenv("HOME")+"/.python-bts/delegate_watch.json"
fd_config = open(config_file)
self.config = json.load(fd_config)["delegate_watch"]
fd_config.close()
config_file = os.getenv("HOME")+"/.python-bts/bts_client.json"
fd_config = open(config_file)
self.config_bts = json.load(fd_config)[self.config["bts_client"]]
fd_config.close()
def init_bts(self):
config_bts = self.config_bts
self.bts_client = BTS(config_bts["user"], config_bts["password"],
config_bts["host"], config_bts["port"])
self.delegate_num = int(self.bts_client.chain_info["delegate_num"])
self.period = float(self.bts_client.chain_info["block_interval"])
def setup_log(self):
# Setting up Logger
self.logger = logging.getLogger('bts')
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s[%(levelname)s]: %(message)s')
fh = logging.handlers.RotatingFileHandler(
"/tmp/bts_delegate_watch.log")
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def init_watch(self):
client_info = self.bts_client.get_info()
self.height = int(client_info["blockchain_head_block_num"])
self.round_left = int(client_info["blockchain_blocks_left_in_round"])
self.active_delegates = self.bts_client.list_active_delegates()
self.active_offset = self.height
for delegate in self.active_delegates:
last_block_num = int(
delegate["delegate_info"]["last_block_num_produced"])
if last_block_num == self.height:
break
self.active_offset -= 1
def init_contact(self):
self.contact = {}
mail_list = self.config["contact"]
for mail in mail_list:
if mail_list[mail]["enable"] == 0:
continue
for delegate in mail_list[mail]["delegates"]:
if delegate not in self.contact:
self.contact[delegate] = [mail]
else:
self.contact[delegate].append(mail)
def process_missed_block(self, height):
index = (height-self.active_offset) % self.delegate_num
account = self.active_delegates[index]["name"]
print("missed", height, account)
self.logger.info("missed %s", account)
self.active_offset -= 1
self.notify(account, height)
return account
def get_block_delegate(self, height):
index = (height-self.active_offset) % self.delegate_num
account = self.active_delegates[index]["name"]
print("......", height, account)
self.logger.info("%d %s", height, account)
return account
def check_missed_block(self, height):
limit = height - self.height + 1
list_blocks = self.bts_client.list_blocks(height, limit)
last_timestamp = -1
for block in reversed(list_blocks):
timestamp = int(block["timestamp"][-2:])
block_num = int(block["block_num"])
if last_timestamp != -1:
period = (timestamp - last_timestamp + 60) % 60
while period != self.period:
period -= self.period
self.process_missed_block(block_num)
self.get_block_delegate(block_num)
last_timestamp = timestamp
def notify(self, account, height):
if account not in self.contact:
print("no contact")
return
print ("sent notify mail")
sender = self.config["sender"]
msg_from = "From: %s <%s>\n" % (self.config["name"], sender)
msg_to = ""
for receiver in self.contact[account]:
msg_to = msg_to+"To: <%s>\n" % receiver
msg_subject = "Subject: missed block attention for %s\n" % account
msg_content = "you have missed block %d\n" % height
message = msg_from+msg_to+msg_subject+msg_content
print(message)
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, self.contact[account], message)
def execute(self):
while True:
try:
client_info = self.bts_client.get_info()
height = int(
client_info["blockchain_head_block_num"]) - self.confirm
if height > self.height:
round_left = (int(
client_info["blockchain_blocks_left_in_round"]
) + self.confirm - 1) % self.delegate_num + 1
if round_left > self.round_left:
if self.round_left != 1:
round_left = 1
height = self.height+self.round_left-1
else:
self.active_delegates = \
self.bts_client.list_active_delegates()
self.check_missed_block(height)
self.height = height
self.round_left = round_left
except Exception as e:
self.logger.exception(e)
now = time.time()
nexttime = int(now/self.period+1)*self.period - now
time.sleep(nexttime+1)
if __name__ == '__main__':
delegate_watch = DelegateWatch()
delegate_watch.execute()
| mit |
CyanogenMod/android_kernel_acer_t30 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
atosatto/ansible | test/units/playbook/test_taggable.py | 293 | 4452 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.playbook.taggable import Taggable
from units.mock.loader import DictDataLoader
class TaggableTestObj(Taggable):
def __init__(self):
self._loader = DictDataLoader({})
self.tags = []
class TestTaggable(unittest.TestCase):
def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags):
taggable_obj = TaggableTestObj()
taggable_obj.tags = tags
evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {})
self.assertEqual(test_value, evaluate)
def test_evaluate_tags_tag_in_only_tags(self):
self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], [])
def test_evaluate_tags_tag_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1'])
def test_evaluate_tags_special_always_in_object_tags(self):
self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], [])
def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag'])
def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always'])
def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self):
self.assert_evaluate_equal(True, ['tag'], ['tagged'], [])
def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self):
self.assert_evaluate_equal(False, [], ['tagged'], [])
def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self):
self.assert_evaluate_equal(False, ['tag'], [], ['tagged'])
def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self):
self.assert_evaluate_equal(True, [], [], ['tagged'])
def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self):
self.assert_evaluate_equal(False, ['tag'], ['untagged'], [])
def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self):
self.assert_evaluate_equal(True, [], ['untagged'], [])
def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self):
self.assert_evaluate_equal(True, ['tag'], [], ['untagged'])
def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self):
self.assert_evaluate_equal(False, [], [], ['untagged'])
def test_evaluate_tags_special_all_in_only_tags(self):
self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged'])
def test_evaluate_tags_special_all_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all'])
def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag'], ['all'], ['all'])
def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self):
self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all'])
def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always'])
def test_evaluate_tags_accepts_lists(self):
self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], [])
def test_evaluate_tags_accepts_strings(self):
self.assert_evaluate_equal(True, 'tag1,tag2', ['tag2'], [])
def test_evaluate_tags_with_repeated_tags(self):
self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag'])
| gpl-3.0 |
rooshilp/CMPUT410W15-project | testenv/lib/python2.7/site-packages/django/conf/locale/de_CH/formats.py | 82 | 1451 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-2.0 |
up1/robotframework-selenium2library | src/Selenium2Library/keywords/_logging.py | 65 | 1206 | import os
import sys
from robot.variables import GLOBAL_VARIABLES
from robot.api import logger
from keywordgroup import KeywordGroup
class _LoggingKeywords(KeywordGroup):
# Private
def _debug(self, message):
logger.debug(message)
def _get_log_dir(self):
logfile = GLOBAL_VARIABLES['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return GLOBAL_VARIABLES['${OUTPUTDIR}']
def _html(self, message):
logger.info(message, True, False)
def _info(self, message):
logger.info(message)
def _log(self, message, level='INFO'):
level = level.upper()
if (level == 'INFO'): self._info(message)
elif (level == 'DEBUG'): self._debug(message)
elif (level == 'WARN'): self._warn(message)
elif (level == 'HTML'): self._html(message)
def _log_list(self, items, what='item'):
msg = ['Altogether %d %s%s.' % (len(items), what, ['s',''][len(items)==1])]
for index, item in enumerate(items):
msg.append('%d: %s' % (index+1, item))
self._info('\n'.join(msg))
return items
def _warn(self, message):
logger.warn(message) | apache-2.0 |
blue-yonder/turbodbc | python/turbodbc/cursor.py | 1 | 15413 | from itertools import islice
from collections import OrderedDict
from turbodbc_intern import make_row_based_result_set, make_parameter_set
from .exceptions import translate_exceptions, InterfaceError, Error
_NO_NUMPY_SUPPORT_MSG = "This installation of turbodbc does not support NumPy extensions. " \
"Please install the `numpy` package. If you have built turbodbc from source, " \
"you may also need to reinstall turbodbc to compile the extensions."
_NO_ARROW_SUPPORT_MSG = "This installation of turbodbc does not support Apache Arrow extensions. " \
"Please install the `pyarrow` package. If you have built turbodbc from source, " \
"you may also need to reinstall turbodbc to compile the extensions."
def _has_numpy_support():
try:
import turbodbc_numpy_support
return True
except ImportError:
return False
def _has_arrow_support():
try:
import turbodbc_arrow_support
return True
except ImportError:
return False
def _make_masked_arrays(result_batch):
from numpy.ma import MaskedArray
from numpy import object_
masked_arrays = []
for data, mask in result_batch:
if isinstance(data, list):
masked_arrays.append(MaskedArray(data=data, mask=mask, dtype=object_))
else:
masked_arrays.append(MaskedArray(data=data, mask=mask))
return masked_arrays
def _assert_numpy_column_preconditions(columns):
from numpy.ma import MaskedArray
from numpy import ndarray
n_columns = len(columns)
for index, column in enumerate(columns, start=1):
if type(column) not in [MaskedArray, ndarray]:
raise InterfaceError("Bad type for column {} of {}. Only numpy.ndarray and numpy.ma.MaskedArrays are supported".format(index, n_columns))
if column.ndim != 1:
raise InterfaceError("Column {} of {} is not one-dimensional".format(index, n_columns))
if not column.flags.c_contiguous:
raise InterfaceError("Column {} of {} is not contiguous".format(index, n_columns))
lengths = [len(column) for column in columns]
all_same_length = all(l == lengths[0] for l in lengths)
if not all_same_length:
raise InterfaceError("All columns must have the same length, got lengths {}".format(lengths))
class Cursor(object):
"""
This class allows you to send SQL commands and queries to a database and retrieve
associated result sets.
"""
def __init__(self, impl):
self.impl = impl
self.result_set = None
self.rowcount = -1
self.arraysize = 1
def __iter__(self):
return self
def __next__(self):
element = self.fetchone()
if element is None:
raise StopIteration
else:
return element
def _assert_valid(self):
if self.impl is None:
raise InterfaceError("Cursor already closed")
def _assert_valid_result_set(self):
if self.result_set is None:
raise InterfaceError("No active result set")
@property
def description(self):
"""
Retrieve a description of the columns in the current result set
:return: A tuple of seven elements. Only some elements are meaningful:\n
* Element #0 is the name of the column
* Element #1 is the type code of the column
* Element #6 is true if the column may contain ``NULL`` values
"""
if self.result_set:
info = self.result_set.get_column_info()
return [(c.name, c.type_code(), None, None, None, None, c.supports_null_values) for c in info]
else:
return None
def _execute(self):
self.impl.execute()
self.rowcount = self.impl.get_row_count()
cpp_result_set = self.impl.get_result_set()
if cpp_result_set:
self.result_set = make_row_based_result_set(cpp_result_set)
else:
self.result_set = None
return self
@translate_exceptions
def execute(self, sql, parameters=None):
"""
Execute an SQL command or query
:param sql: A (unicode) string that contains the SQL command or query. If you would like to
use parameters, please use a question mark ``?`` at the location where the
parameter shall be inserted.
:param parameters: An iterable of parameter values. The number of values must match
the number of parameters in the SQL string.
:return: The ``Cursor`` object to allow chaining of operations.
"""
self.rowcount = -1
self._assert_valid()
self.impl.prepare(sql)
if parameters:
buffer = make_parameter_set(self.impl)
buffer.add_set(parameters)
buffer.flush()
return self._execute()
@translate_exceptions
def executemany(self, sql, parameters=None):
"""
Execute an SQL command or query with multiple parameter sets passed in a row-wise fashion.
This function is part of PEP-249.
:param sql: A (unicode) string that contains the SQL command or query. If you would like to
use parameters, please use a question mark ``?`` at the location where the
parameter shall be inserted.
:param parameters: An iterable of iterable of parameter values. The outer iterable represents
separate parameter sets. The inner iterable contains parameter values for a given
parameter set. The number of values of each set must match the number of parameters
in the SQL string.
:return: The ``Cursor`` object to allow chaining of operations.
"""
self.rowcount = -1
self._assert_valid()
self.impl.prepare(sql)
if parameters:
buffer = make_parameter_set(self.impl)
for parameter_set in parameters:
buffer.add_set(parameter_set)
buffer.flush()
return self._execute()
@translate_exceptions
def executemanycolumns(self, sql, columns):
"""
Execute an SQL command or query with multiple parameter sets that are passed in
a column-wise fashion as opposed to the row-wise parameters in ``executemany()``.
This function is a turbodbc-specific extension to PEP-249.
:param sql: A (unicode) string that contains the SQL command or query. If you would like to
use parameters, please use a question mark ``?`` at the location where the
parameter shall be inserted.
:param columns: An iterable of NumPy MaskedArrays. The Arrays represent the columnar
parameter data,
:return: The ``Cursor`` object to allow chaining of operations.
"""
self.rowcount = -1
self._assert_valid()
self.impl.prepare(sql)
if _has_arrow_support():
import pyarrow as pa
def _num_chunks(c):
if not isinstance(c, pa.ChunkedArray):
# pyarrow < 0.15
c = c.data
return c.num_chunks
if isinstance(columns, pa.Table):
from turbodbc_arrow_support import set_arrow_parameters
for column in columns.itercolumns():
if _num_chunks(column) != 1:
raise NotImplementedError("Chunked Arrays are "
"not yet supported")
set_arrow_parameters(self.impl, columns)
return self._execute()
# Workaround to give users a better error message without a need
# to import pyarrow
if columns.__class__.__module__.startswith('pyarrow'):
raise Error(_NO_ARROW_SUPPORT_MSG)
if not _has_numpy_support():
raise Error(_NO_NUMPY_SUPPORT_MSG)
_assert_numpy_column_preconditions(columns)
from numpy.ma import MaskedArray
from turbodbc_numpy_support import set_numpy_parameters
split_arrays = []
for column in columns:
if isinstance(column, MaskedArray):
split_arrays.append((column.data, column.mask, str(column.dtype)))
else:
split_arrays.append((column, False, str(column.dtype)))
set_numpy_parameters(self.impl, split_arrays)
return self._execute()
@translate_exceptions
def fetchone(self):
"""
Returns a single row of a result set. Requires an active result set on the database
generated with ``execute()`` or ``executemany()``.
:return: Returns ``None`` when no more rows are available in the result set
"""
self._assert_valid_result_set()
result = self.result_set.fetch_row()
if len(result) == 0:
return None
else:
return result
@translate_exceptions
def fetchall(self):
"""
Fetches a list of all rows in the active result set generated with ``execute()`` or
``executemany()``.
:return: A list of rows
"""
return [row for row in self]
@translate_exceptions
def fetchmany(self, size=None):
"""
Fetches a batch of rows in the active result set generated with ``execute()`` or
``executemany()``.
:param size: Controls how many rows are returned. The default ``None`` means that
the value of Cursor.arraysize is used.
:return: A list of rows
"""
if size is None:
size = self.arraysize
if (size <= 0):
raise InterfaceError("Invalid arraysize {} for fetchmany()".format(size))
return [row for row in islice(self, size)]
def fetchallnumpy(self):
"""
Fetches all rows in the active result set generated with ``execute()`` or
``executemany()``.
:return: An ``OrderedDict`` of *columns*, where the keys of the dictionary
are the column names. The columns are of NumPy's ``MaskedArray``
type, where the optimal data type for each result set column is
chosen automatically.
"""
from numpy.ma import concatenate
batches = list(self._numpy_batch_generator())
column_names = [description[0] for description in self.description]
return OrderedDict(zip(column_names, [concatenate(column) for column in zip(*batches)]))
def fetchnumpybatches(self):
"""
Returns an iterator over all rows in the active result set generated with ``execute()`` or
``executemany()``.
:return: An iterator you can use to iterate over batches of rows of the result set. Each
batch consists of an ``OrderedDict`` of NumPy ``MaskedArray`` instances. See
``fetchallnumpy()`` for details.
"""
batchgen = self._numpy_batch_generator()
column_names = [description[0] for description in self.description]
for next_batch in batchgen:
yield OrderedDict(zip(column_names, next_batch))
def _numpy_batch_generator(self):
self._assert_valid_result_set()
if not _has_numpy_support():
raise Error(_NO_NUMPY_SUPPORT_MSG)
from turbodbc_numpy_support import make_numpy_result_set
numpy_result_set = make_numpy_result_set(self.impl.get_result_set())
first_run = True
while True:
result_batch = _make_masked_arrays(numpy_result_set.fetch_next_batch())
is_empty_batch = (len(result_batch[0]) == 0)
if is_empty_batch and not first_run:
return # Let us return a typed result set at least once
first_run = False
yield result_batch
def fetcharrowbatches(self, strings_as_dictionary=False, adaptive_integers=False):
"""
Fetches rows in the active result set generated with ``execute()`` or
``executemany()`` as an iterable of arrow tables.
:param strings_as_dictionary: If true, fetch string columns as
dictionary[string] instead of a plain string column.
:param adaptive_integers: If true, instead of the integer type returned
by the database (driver), this produce integer columns with the
smallest possible integer type in which all values can be
stored. Be aware that here the type depends on the resulting
data.
:return: generator of ``pyarrow.Table``
"""
self._assert_valid_result_set()
if _has_arrow_support():
from turbodbc_arrow_support import make_arrow_result_set
rs = make_arrow_result_set(
self.impl.get_result_set(),
strings_as_dictionary,
adaptive_integers)
first_run = True
while True:
table = rs.fetch_next_batch()
is_empty_batch = (len(table) == 0)
if is_empty_batch and not first_run:
return # Let us return a typed result set at least once
first_run = False
yield table
else:
raise Error(_NO_ARROW_SUPPORT_MSG)
def fetchallarrow(self, strings_as_dictionary=False, adaptive_integers=False):
"""
Fetches all rows in the active result set generated with ``execute()`` or
``executemany()``.
:param strings_as_dictionary: If true, fetch string columns as
dictionary[string] instead of a plain string column.
:param adaptive_integers: If true, instead of the integer type returned
by the database (driver), this produce integer columns with the
smallest possible integer type in which all values can be
stored. Be aware that here the type depends on the resulting
data.
:return: ``pyarrow.Table``
"""
self._assert_valid_result_set()
if _has_arrow_support():
from turbodbc_arrow_support import make_arrow_result_set
return make_arrow_result_set(
self.impl.get_result_set(),
strings_as_dictionary,
adaptive_integers).fetch_all()
else:
raise Error(_NO_ARROW_SUPPORT_MSG)
def close(self):
"""
Close the cursor.
"""
self.result_set = None
if self.impl is not None:
self.impl._reset()
self.impl = None
def setinputsizes(self, sizes):
"""
Has no effect since turbodbc automatically picks appropriate
return types and sizes. Method exists since PEP-249 requires it.
"""
pass
def setoutputsize(self, size, column=None):
"""
Has no effect since turbodbc automatically picks appropriate
input types and sizes. Method exists since PEP-249 requires it.
"""
pass
def __enter__(self):
"""
Conformance to PEP-343
"""
return self
def __exit__(self, type, value, traceback):
"""
Conformance to PEP-343
"""
return self.close()
| mit |
HeyItsJono/Pythonista | Script_Downloader.uipack.py | 1 | 9068 | # -*- coding: utf-8 -*-
###############################################################################
# This is a self-extracting UI application package for Script_Downloader.
# Run this script once to extract the packaged application.
# The files will be extracted to Script_Downloader.py and Script_Downloader.pyui.
# Make sure that these files do not exist yet.
# To update from an older version, move or delete the old files first.
# After extracting, the application can be found at Script_Downloader.py.
# This bundle can be deleted after extraction.
###############################################################################
# Packaged using PackUI by dgelessus
# https://github.com/dgelessus/pythonista-scripts/blob/master/UI/PackUI.py
###############################################################################
import console, os.path
NAME = "Script_Downloader"
PYFILE = """# coding: utf-8
import ui
import urllib2
import clipboard
from console import hud_alert
import console
import sys
import os
def parse_name(url):
n = len(url)-1
name = ''
try:
assert r'/' in url
except AssertionError:
return url
for char in url:
if url[n] != r'/':
name = url[n] + name
n += -1
else:
break
return name
def parse_extension(name):
n = len(name)-1
extension = ''
try:
assert r'.' in name
except AssertionError:
return extension
for char in name:
if name[n] != r'.':
extension = name[n] + extension
n += -1
else:
break
return extension
def download_tapped(sender):
'@type sender: ui.Button'
console.clear()
urlfield = sender.superview['urlfield']
filenamefield = sender.superview['filenamefield']
extensionfield = sender.superview['extensionfield']
extensioncontrol = sender.superview['extensioncontrol']
if extensioncontrol.selected_index == 0:
extension = '.py'
elif extensioncontrol.selected_index == 1:
extension = '.pyui'
elif extensioncontrol.selected_index == 2:
if extensionfield.text != '':
if not '.' in extensionfield.text:
extension = '.' + extensionfield.text
else:
extension = extensionfield.text
else:
extension = ''
filename = filenamefield.text + extension
filenum = 1
while os.path.isfile(filename) is True:
filename = filenamefield.text + ' ({})'.format(str(filenum)) + extension
filenum += 1
hud_alert('Downloading...')
try:
console.show_activity()
url = urllib2.urlopen(urlfield.text).read()
except (ValueError, urllib2.URLError):
hud_alert('URL not valid', icon = 'error')
sys.exit()
hud_alert("Saving...")
try:
with open(filename, "w") as out_file:
out_file.write(url)
out_file.close()
except IOError:
os.makedirs(os.path.dirname(filename))
with open(filename, "w") as out_file:
out_file.write(url)
out_file.close()
console.hide_activity()
hud_alert("Saved!")
def paste_tapped(sender):
'@type sender: ui.Button'
urlfield = sender.superview['urlfield']
filenamefield = sender.superview['filenamefield']
extensionfield = sender.superview['extensionfield']
extensioncontrol = sender.superview['extensioncontrol']
urlfield.text = unicode(clipboard.get())
name = parse_name(urlfield.text)
extension = parse_extension(name)
name = name[:(len(name) - (len(extension) + 1))]
filenamefield.text = name
if extension == 'py':
extensioncontrol.selected_index = 0
extensionfield.text = ''
elif extension == 'pyui':
extensioncontrol.selected_index = 1
extensionfield.text = ''
else:
extensioncontrol.selected_index = 2
extensionfield.text = extension
def clear_tapped(sender):
'@type sender: ui.Button'
urlfield = sender.superview['urlfield']
filenamefield = sender.superview['filenamefield']
extensionfield = sender.superview['extensionfield']
if sender.name == 'clearurl':
urlfield.text = ''
elif sender.name == 'clearname':
filenamefield.text = ''
elif sender.name == 'clearextension':
extensionfield.text = ''
v = ui.load_view('Script_Downloader')
if ui.get_screen_size()[1] >= 768:
# iPad
v.present('popover')
else:
# iPhone
v.present(orientations=['portrait'])
"""
PYUIFILE = """[{"class":"View","attributes":{"name":"Script Downloader","background_color":"RGBA(1.000000,1.000000,1.000000,1.000000)","tint_color":"RGBA(0.336735,0.663623,0.785714,1.000000)","enabled":true,"border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","flex":""},"frame":"{{0, 0}, {541, 377}}","nodes":[{"class":"TextField","attributes":{"alignment":"center","autocorrection_type":"no","font_size":17,"border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","enabled":true,"flex":"","placeholder":"Paste Raw URL Here","text_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","secure":false,"name":"urlfield","border_style":3,"uuid":"5360AD96-456E-4A90-83AE-BB1A6A3E908A","spellchecking_type":"no"},"frame":"{{6, 6}, {434, 83.5}}","nodes":[]},{"class":"SegmentedControl","attributes":{"name":"extensioncontrol","border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","uuid":"4612E98D-1847-4E28-83AD-60C4938A08BF","enabled":true,"segments":".py|.pyui|Other","flex":"LR"},"frame":"{{7, 189}, {264, 83.5}}","nodes":[]},{"class":"Button","attributes":{"background_color":"RGBA(0.928571,0.928571,0.928571,1.000000)","border_color":"RGBA(0.862245,0.897359,0.928571,1.000000)","font_size":15,"title":"Download","enabled":true,"flex":"","action":"download_tapped","font_bold":false,"name":"Downloadbutton","border_width":1,"uuid":"E21C39A0-F201-412C-A8A2-64EE8A5776F6","corner_radius":1},"frame":"{{7, 280.5}, {528, 88.5}}","nodes":[]},{"class":"TextField","attributes":{"alignment":"center","autocorrection_type":"no","font_size":17,"border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","enabled":true,"flex":"","placeholder":"Enter filename sans extension (Path optional)","text_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","secure":false,"name":"filenamefield","border_style":3,"uuid":"930ECD80-962E-4E57-BD9B-C8B994516A88","spellchecking_type":"no"},"frame":"{{7, 97.5}, {528, 83.5}}","nodes":[]},{"class":"TextField","attributes":{"alignment":"center","autocorrection_type":"no","font_size":17,"border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","enabled":true,"flex":"","placeholder":"\\"Other\\" file extension","text_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","name":"extensionfield","border_style":3,"spellchecking_type":"no","uuid":"D3AABF4D-D6E2-4F9B-B75A-A285635B56ED"},"frame":"{{279, 189}, {256, 83.5}}","nodes":[]},{"class":"Button","attributes":{"background_color":"RGBA(0.928571,0.928571,0.928571,1.000000)","image_name":"ionicons-clipboard-32","border_color":"RGBA(0.862245,0.898061,0.928571,1.000000)","font_size":15,"title":"","enabled":true,"flex":"","action":"paste_tapped","font_bold":false,"name":"clipboard","border_width":1,"uuid":"135B3DD8-0A64-44F7-A818-86F79D9D5ACA","corner_radius":1},"frame":"{{448, 6}, {86, 83.5}}","nodes":[]},{"class":"Button","attributes":{"background_color":"RGBA(1.000000,1.000000,1.000000,1.000000)","image_name":"ionicons-ios7-close-24","border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","font_size":15,"title":"","enabled":true,"flex":"","tint_color":"RGBA(0.428571,0.428571,0.428571,1.000000)","action":"clear_tapped","font_bold":false,"alpha":0.5000000000000001,"name":"clearurl","uuid":"971A849B-E460-4E09-A049-5BBDE73FD5E6"},"frame":"{{406, 32}, {33, 30}}","nodes":[]},{"class":"Button","attributes":{"background_color":"RGBA(1.000000,1.000000,1.000000,1.000000)","image_name":"ionicons-ios7-close-24","border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","font_size":15,"title":"","enabled":true,"flex":"","tint_color":"RGBA(0.428571,0.428571,0.428571,1.000000)","action":"clear_tapped","font_bold":false,"alpha":0.5000000000000001,"name":"clearname","uuid":"5FF007B4-4F6B-4613-B801-9E654177E401"},"frame":"{{500.5, 122}, {33.5, 33}}","nodes":[]},{"class":"Button","attributes":{"background_color":"RGBA(1.000000,1.000000,1.000000,1.000000)","image_name":"ionicons-ios7-close-24","border_color":"RGBA(0.000000,0.000000,0.000000,1.000000)","font_size":15,"title":"","enabled":true,"flex":"","tint_color":"RGBA(0.428571,0.428571,0.428571,1.000000)","action":"clear_tapped","font_bold":false,"alpha":0.5000000000000001,"name":"clearextension","uuid":"DCA9375E-1982-48BC-B466-151C38720FEC"},"frame":"{{500, 214}, {33.5, 32.5}}","nodes":[]}]}]"""
def fix_quotes_out(s):
return s.replace("\\\"\\\"\\\"", "\"\"\"").replace("\\\\", "\\")
def main():
if os.path.exists(NAME + ".py"):
console.alert("Failed to Extract", NAME + ".py already exists.")
return
if os.path.exists(NAME + ".pyui"):
console.alert("Failed to Extract", NAME + ".pyui already exists.")
return
with open(NAME + ".py", "w") as f:
f.write(fix_quotes_out(PYFILE))
with open(NAME + ".pyui", "w") as f:
f.write(fix_quotes_out(PYUIFILE))
msg = NAME + ".py and " + NAME + ".pyui were successfully extracted!"
console.alert("Extraction Successful", msg, "OK", hide_cancel_button=True)
if __name__ == "__main__":
main()
| mit |
albertyw/sublime-settings | Package Control/package_control/commands/existing_packages_command.py | 11 | 2110 | import os
import re
import sublime
from ..package_manager import PackageManager
class ExistingPackagesCommand():
"""
Allows listing installed packages and their current version
"""
def __init__(self):
self.manager = PackageManager()
def make_package_list(self, action=''):
"""
Returns a list of installed packages suitable for displaying in the
quick panel.
:param action:
An action to display at the beginning of the third element of the
list returned for each package
:return:
A list of lists, each containing three strings:
0 - package name
1 - package description
2 - [action] installed version; package url
"""
packages = self.manager.list_packages()
if action:
action += ' '
package_list = []
for package in sorted(packages, key=lambda s: s.lower()):
package_entry = [package]
metadata = self.manager.get_metadata(package)
package_dir = os.path.join(sublime.packages_path(), package)
description = metadata.get('description')
if not description:
description = 'No description provided'
package_entry.append(description)
version = metadata.get('version')
if not version and os.path.exists(os.path.join(package_dir,
'.git')):
installed_version = 'git repository'
elif not version and os.path.exists(os.path.join(package_dir,
'.hg')):
installed_version = 'hg repository'
else:
installed_version = 'v' + version if version else \
'unknown version'
url = metadata.get('url')
if url:
url = '; ' + re.sub('^https?://', '', url)
else:
url = ''
package_entry.append(action + installed_version + url)
package_list.append(package_entry)
return package_list
| mit |
jalexvig/tensorflow | tensorflow/python/kernel_tests/pool_test.py | 24 | 14407 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unified pooling functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def pool_direct_single_axis(
input, # pylint: disable=redefined-builtin
axis,
window_size,
pooling_type,
padding,
dilation_rate,
stride):
"""Numpy implementation of pooling along a single axis.
This is intended for testing only, and therefore isn't particularly efficient.
See pool_direct below for the meaning of the arguments.
Args:
input: numpy array.
axis: axis along which to perform pooling.
window_size: int >= 1. Size of pooling window within axis.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: int >= 1. Dilation factor for window, i.e. stride at which
to sample input.
stride: int >= 1. Stride at which to generate output.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
effective_window_size = (window_size - 1) * dilation_rate + 1
input_size = input.shape[axis]
if padding == "SAME":
output_size = int(math.ceil(input_size / stride))
total_padding_amount = max(
0, (output_size - 1) * stride + effective_window_size - input_size)
before_padding = total_padding_amount // 2
elif padding == "VALID":
output_size = int(
math.ceil((input_size - effective_window_size + 1) / stride))
before_padding = 0
else:
raise ValueError("Unsupported padding type: %r" % (padding,))
output_shape = input.shape[:axis] + (output_size,) + input.shape[axis + 1:]
output = np.zeros(output_shape, input.dtype)
initial_dim_selector = tuple(np.s_[:] for _ in range(axis))
if pooling_type == "MAX":
pooling_func = np.max
elif pooling_type == "AVG":
pooling_func = np.mean
else:
raise ValueError("Unsupported pooling type: %r" % (pooling_type,))
for output_pos in range(output_size):
input_start_pos = output_pos * stride - before_padding
input_end_pos = min(input_start_pos + effective_window_size, input_size)
if input_start_pos < 0:
input_start_pos += dilation_rate
input_slice = np.s_[input_start_pos:input_end_pos:dilation_rate]
output[initial_dim_selector + (output_pos,)] = pooling_func(
input[initial_dim_selector + (input_slice,)], axis=axis)
return output
def pool_direct(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding, # pylint: disable=redefined-builtin
dilation_rate,
strides,
data_format=None):
"""Numpy implementation of pooling.
This is intended for testing only, and therefore isn't particularly efficient.
See tensorflow.nn.pool.
Args:
input: numpy array of rank N+2.
window_shape: Sequence of N ints >= 1.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: Sequence of N ints >= 1.
strides: Sequence of N ints >= 1.
data_format: If specified and starts with "NC", indicates that second
dimension, rather than the last dimension, specifies the channel.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
if data_format is None or not data_format.startswith("NC"):
spatial_start_dim = 1
else:
spatial_start_dim = 2
output = input
for i in range(len(window_shape)):
output = pool_direct_single_axis(
input=output,
axis=i + spatial_start_dim,
window_size=window_shape[i],
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilation_rate[i],
stride=strides[i])
return output
class PoolingTest(test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
# used.
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
y2 = nn_ops.pool(input=x, **kwargs)
self.assertAllClose(y1, y2.eval(), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
self._test(
input_shape=[1, 1, 10, 1],
window_shape=[1, 3],
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=[1, 2])
def testPool1D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 2], [2, 10, 2]]:
for window_shape in [[1], [2], [3]]:
if padding != "SAME":
for dilation_rate in [[1], [2], [3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2], [3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testPool2D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 2], [2, 10, 9, 2]]:
for window_shape in [[1, 1], [2, 1], [2, 3]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [1, 2], [2, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testPool3D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 11, 2], [2, 10, 9, 11, 2]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 3, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [1, 2, 2],
[2, 3, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [1, 2, 2], [2, 3, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
def testPoolNC(self):
if test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[1],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[2],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2],
dilation_rate=[1, 1],
data_format="NCHW")
self._test(
input_shape=[2, 2, 7, 5, 3],
window_shape=[2, 2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2, 1],
dilation_rate=[1, 1, 1],
data_format="NCDHW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding="VALID",
pooling_type="MAX",
strides=[1, 1],
dilation_rate=[2, 2],
data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
output = nn_ops.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error(
[x], [input_shape], output, y_shape, x_init_value=[x_val])
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
def testGradient1D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 5, 2], [1, 4, 1]]:
for window_shape in [[1], [2]]:
if padding != "SAME":
for dilation_rate in [[1], [2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testGradient2D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 4, 5, 2], [1, 5, 4, 1]]:
for window_shape in [[1, 1], [2, 1], [2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testGradient3D(self):
with self.test_session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[1, 3, 5, 4, 1], [1, 5, 4, 3, 1]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
if __name__ == "__main__":
test.main()
| apache-2.0 |
batousik/Python2-Diamond | diamond_game/test.py | 1 | 1378 | import os
import pygame
import sys
from diamond_game.model.models import Board
import Queue
def main():
pygame.init()
size = width, height = 320, 240
speed = [2, 2]
black = 0, 0, 0
screen = pygame.display.set_mode(size)
ball = pygame.image.load("test.png")
ballrect = ball.get_rect()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
ballrect = ballrect.move(speed)
if ballrect.left < 0 or ballrect.right > width:
speed[0] = -speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
speed[1] = -speed[1]
screen.fill(black)
screen.blit(ball, ballrect)
pygame.display.flip()
a = Queue.Queue(0)
a.put(1)
a.put(1)
a.put(1)
a.put(1)
def geta():
if not a.empty():
return a.get()
if __name__ == "__main__":
x = 5
for i in range(5):
print 1
board = Board()
board.make_board()
board.init_board()
board.print_board()
print -x
for file in os.listdir("sounds"):
fullname = os.path.join('sounds', file)
print fullname
event = geta()
print event
while event is not None:
print event
event = geta()
print 5/2
li = [1,2,3,4]
print li[234231]
li = []
# main() | mit |
orangeduck/PyAutoC | Python27/Lib/lib-tk/turtle.py | 15 | 138936 | #
# turtle.py: a Tkinter based turtle graphics module for Python
# Version 1.0.1 - 24. 9. 2009
#
# Copyright (C) 2006 - 2010 Gregor Lingl
# email: glingl@aon.at
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
"""
Turtle graphics is a popular way for introducing programming to
kids. It was part of the original Logo programming language developed
by Wally Feurzig and Seymour Papert in 1966.
Imagine a robotic turtle starting at (0, 0) in the x-y plane. Give it
the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
the direction it is facing, drawing a line as it moves. Give it the
command turtle.left(25), and it rotates in-place 25 degrees clockwise.
By combining together these and similar commands, intricate shapes and
pictures can easily be drawn.
----- turtle.py
This module is an extended reimplementation of turtle.py from the
Python standard distribution up to Python 2.5. (See: http://www.python.org)
It tries to keep the merits of turtle.py and to be (nearly) 100%
compatible with it. This means in the first place to enable the
learning programmer to use all the commands, classes and methods
interactively when using the module from within IDLE run with
the -n switch.
Roughly it has the following features added:
- Better animation of the turtle movements, especially of turning the
turtle. So the turtles can more easily be used as a visual feedback
instrument by the (beginning) programmer.
- Different turtle shapes, gif-images as turtle shapes, user defined
and user controllable turtle shapes, among them compound
(multicolored) shapes. Turtle shapes can be stretched and tilted, which
makes turtles very versatile geometrical objects.
- Fine control over turtle movement and screen updates via delay(),
and enhanced tracer() and speed() methods.
- Aliases for the most commonly used commands, like fd for forward etc.,
following the early Logo traditions. This reduces the boring work of
typing long sequences of commands, which often occur in a natural way
when kids try to program fancy pictures on their first encounter with
turtle graphics.
- Turtles now have an undo()-method with configurable undo-buffer.
- Some simple commands/methods for creating event driven programs
(mouse-, key-, timer-events). Especially useful for programming games.
- A scrollable Canvas class. The default scrollable Canvas can be
extended interactively as needed while playing around with the turtle(s).
- A TurtleScreen class with methods controlling background color or
background image, window and canvas size and other properties of the
TurtleScreen.
- There is a method, setworldcoordinates(), to install a user defined
coordinate-system for the TurtleScreen.
- The implementation uses a 2-vector class named Vec2D, derived from tuple.
This class is public, so it can be imported by the application programmer,
which makes certain types of computations very natural and compact.
- Appearance of the TurtleScreen and the Turtles at startup/import can be
configured by means of a turtle.cfg configuration file.
The default configuration mimics the appearance of the old turtle module.
- If configured appropriately the module reads in docstrings from a docstring
dictionary in some different language, supplied separately and replaces
the English ones by those read in. There is a utility function
write_docstringdict() to write a dictionary with the original (English)
docstrings to disc, so it can serve as a template for translations.
Behind the scenes there are some features included with possible
extensions in in mind. These will be commented and documented elsewhere.
"""
_ver = "turtle 1.0b1 - for Python 2.6 - 30. 5. 2008, 18:08"
#print _ver
import Tkinter as TK
import types
import math
import time
import os
from os.path import isfile, split, join
from copy import deepcopy
from math import * ## for compatibility with old turtle module
_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen',
'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D']
_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye',
'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas',
'getshapes', 'listen', 'mode', 'onkey', 'onscreenclick', 'ontimer',
'register_shape', 'resetscreen', 'screensize', 'setup',
'setworldcoordinates', 'title', 'tracer', 'turtles', 'update',
'window_height', 'window_width']
_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk',
'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color',
'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd',
'fill', 'fillcolor', 'forward', 'get_poly', 'getpen', 'getscreen',
'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown',
'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd',
'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position',
'pu', 'radians', 'right', 'reset', 'resizemode', 'rt',
'seth', 'setheading', 'setpos', 'setposition', 'settiltangle',
'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'showturtle',
'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards', 'tracer',
'turtlesize', 'undo', 'undobufferentries', 'up', 'width',
'window_height', 'window_width', 'write', 'xcor', 'ycor']
_tg_utilities = ['write_docstringdict', 'done', 'mainloop']
_math_functions = ['acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh',
'e', 'exp', 'fabs', 'floor', 'fmod', 'frexp', 'hypot', 'ldexp', 'log',
'log10', 'modf', 'pi', 'pow', 'sin', 'sinh', 'sqrt', 'tan', 'tanh']
__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions +
_tg_utilities + _math_functions)
_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos',
'pu', 'rt', 'seth', 'setpos', 'setposition', 'st',
'turtlesize', 'up', 'width']
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
##print "cwd:", os.getcwd()
##print "__file__:", __file__
##
##def show(dictionary):
## print "=========================="
## for key in sorted(dictionary.keys()):
## print key, ":", dictionary[key]
## print "=========================="
## print
def config_dict(filename):
"""Convert content of config-file into dictionary."""
f = open(filename, "r")
cfglines = f.readlines()
f.close()
cfgdict = {}
for line in cfglines:
line = line.strip()
if not line or line.startswith("#"):
continue
try:
key, value = line.split("=")
except:
print "Bad line in config-file %s:\n%s" % (filename,line)
continue
key = key.strip()
value = value.strip()
if value in ["True", "False", "None", "''", '""']:
value = eval(value)
else:
try:
if "." in value:
value = float(value)
else:
value = int(value)
except:
pass # value need not be converted
cfgdict[key] = value
return cfgdict
def readconfig(cfgdict):
"""Read config-files, change configuration-dict accordingly.
If there is a turtle.cfg file in the current working directory,
read it from there. If this contains an importconfig-value,
say 'myway', construct filename turtle_mayway.cfg else use
turtle.cfg and read it from the import-directory, where
turtle.py is located.
Update configuration dictionary first according to config-file,
in the import directory, then according to config-file in the
current working directory.
If no config-file is found, the default configuration is used.
"""
default_cfg = "turtle.cfg"
cfgdict1 = {}
cfgdict2 = {}
if isfile(default_cfg):
cfgdict1 = config_dict(default_cfg)
#print "1. Loading config-file %s from: %s" % (default_cfg, os.getcwd())
if "importconfig" in cfgdict1:
default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"]
try:
head, tail = split(__file__)
cfg_file2 = join(head, default_cfg)
except:
cfg_file2 = ""
if isfile(cfg_file2):
#print "2. Loading config-file %s:" % cfg_file2
cfgdict2 = config_dict(cfg_file2)
## show(_CFG)
## show(cfgdict2)
_CFG.update(cfgdict2)
## show(_CFG)
## show(cfgdict1)
_CFG.update(cfgdict1)
## show(_CFG)
try:
readconfig(_CFG)
except:
print "No configfile read, reason unknown"
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
## helper functions for Scrolled Canvas, to forward Canvas-methods
## to ScrolledCanvas class
def __methodDict(cls, _dict):
"""helper function for Scrolled Canvas"""
baseList = list(cls.__bases__)
baseList.reverse()
for _super in baseList:
__methodDict(_super, _dict)
for key, value in cls.__dict__.items():
if type(value) == types.FunctionType:
_dict[key] = value
def __methods(cls):
"""helper function for Scrolled Canvas"""
_dict = {}
__methodDict(cls, _dict)
return _dict.keys()
__stringBody = (
'def %(method)s(self, *args, **kw): return ' +
'self.%(attribute)s.%(method)s(*args, **kw)')
def __forwardmethods(fromClass, toClass, toPart, exclude = ()):
"""Helper functions for Scrolled Canvas, used to forward
ScrolledCanvas-methods to Tkinter.Canvas class.
"""
_dict = {}
__methodDict(toClass, _dict)
for ex in _dict.keys():
if ex[:1] == '_' or ex[-1:] == '_':
del _dict[ex]
for ex in exclude:
if ex in _dict:
del _dict[ex]
for ex in __methods(fromClass):
if ex in _dict:
del _dict[ex]
for method, func in _dict.items():
d = {'method': method, 'func': func}
if type(toPart) == types.StringType:
execString = \
__stringBody % {'method' : method, 'attribute' : toPart}
exec execString in d
fromClass.__dict__[method] = d[method]
class ScrolledCanvas(TK.Frame):
"""Modeled after the scrolled canvas class from Grayons's Tkinter book.
Used as the default canvas, which pops up automatically when
using turtle graphics functions or the Turtle class.
"""
def __init__(self, master, width=500, height=350,
canvwidth=600, canvheight=500):
TK.Frame.__init__(self, master, width=width, height=height)
self._rootwindow = self.winfo_toplevel()
self.width, self.height = width, height
self.canvwidth, self.canvheight = canvwidth, canvheight
self.bg = "white"
self._canvas = TK.Canvas(master, width=width, height=height,
bg=self.bg, relief=TK.SUNKEN, borderwidth=2)
self.hscroll = TK.Scrollbar(master, command=self._canvas.xview,
orient=TK.HORIZONTAL)
self.vscroll = TK.Scrollbar(master, command=self._canvas.yview)
self._canvas.configure(xscrollcommand=self.hscroll.set,
yscrollcommand=self.vscroll.set)
self.rowconfigure(0, weight=1, minsize=0)
self.columnconfigure(0, weight=1, minsize=0)
self._canvas.grid(padx=1, in_ = self, pady=1, row=0,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.reset()
self._rootwindow.bind('<Configure>', self.onResize)
def reset(self, canvwidth=None, canvheight=None, bg = None):
"""Adjust canvas and scrollbars according to given canvas size."""
if canvwidth:
self.canvwidth = canvwidth
if canvheight:
self.canvheight = canvheight
if bg:
self.bg = bg
self._canvas.config(bg=bg,
scrollregion=(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2))
self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /
self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /
self.canvheight)
self.adjustScrolls()
def adjustScrolls(self):
""" Adjust scrollbars according to window- and canvas-size.
"""
cwidth = self._canvas.winfo_width()
cheight = self._canvas.winfo_height()
self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)
if cwidth < self.canvwidth or cheight < self.canvheight:
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
else:
self.hscroll.grid_forget()
self.vscroll.grid_forget()
def onResize(self, event):
"""self-explanatory"""
self.adjustScrolls()
def bbox(self, *args):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.bbox(*args)
def cget(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.cget(*args, **kwargs)
def config(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.config(*args, **kwargs)
def bind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.bind(*args, **kwargs)
def unbind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.unbind(*args, **kwargs)
def focus_force(self):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.focus_force()
__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas')
class _Root(TK.Tk):
"""Root class for Screen based on Tkinter."""
def __init__(self):
TK.Tk.__init__(self)
def setupcanvas(self, width, height, cwidth, cheight):
self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight)
self._canvas.pack(expand=1, fill="both")
def _getcanvas(self):
return self._canvas
def set_geometry(self, width, height, startx, starty):
self.geometry("%dx%d%+d%+d"%(width, height, startx, starty))
def ondestroy(self, destroy):
self.wm_protocol("WM_DELETE_WINDOW", destroy)
def win_width(self):
return self.winfo_screenwidth()
def win_height(self):
return self.winfo_screenheight()
Canvas = TK.Canvas
class TurtleScreenBase(object):
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
@staticmethod
def _blankimage():
"""return a blank image object
"""
img = TK.PhotoImage(width=1, height=1)
img.blank()
return img
@staticmethod
def _image(filename):
"""return an image object containing the
imagedata from a gif-file named filename.
"""
return TK.PhotoImage(file=filename)
def __init__(self, cv):
self.cv = cv
if isinstance(cv, ScrolledCanvas):
w = self.cv.canvwidth
h = self.cv.canvheight
else: # expected: ordinary TK.Canvas
w = int(self.cv.cget("width"))
h = int(self.cv.cget("height"))
self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 ))
self.canvwidth = w
self.canvheight = h
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(polyitem, *cl)
if fill is not None:
self.cv.itemconfigure(polyitem, fill=fill)
if outline is not None:
self.cv.itemconfigure(polyitem, outline=outline)
if width is not None:
self.cv.itemconfigure(polyitem, width=width)
if top:
self.cv.tag_raise(polyitem)
def _createline(self):
"""Create an invisible line item on canvas self.cv)
"""
return self.cv.create_line(0, 0, 0, 0, fill="", width=2,
capstyle = TK.ROUND)
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
if coordlist is not None:
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(lineitem, *cl)
if fill is not None:
self.cv.itemconfigure(lineitem, fill=fill)
if width is not None:
self.cv.itemconfigure(lineitem, width=width)
if top:
self.cv.tag_raise(lineitem)
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
self.cv.delete(item)
def _update(self):
"""Redraw graphics items on canvas
"""
self.cv.update()
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
self.cv.after(delay)
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
try:
rgb = self.cv.winfo_rgb(color)
ok = True
except TK.TclError:
ok = False
return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.config(bg = color)
self._update()
else:
return self.cv.cget("bg")
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
x, y = pos
x = x * self.xscale
y = y * self.yscale
anchor = {"left":"sw", "center":"s", "right":"se" }
item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align],
fill = pencolor, font = font)
x0, y0, x1, y1 = self.cv.bbox(item)
self.cv.update()
return item, x1-1
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _onclick(self, item, fun, num=1, add=None):
"""Bind fun to mouse-click event on turtle.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
"""
if fun is None:
self.cv.tag_unbind(item, "<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button-%s>" % num, eventfun, add)
def _onrelease(self, item, fun, num=1, add=None):
"""Bind fun to mouse-button-release event on turtle.
fun must be a function with two arguments, the coordinates
of the point on the canvas where mouse button is released.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-ButtonRelease>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button%s-ButtonRelease>" % num,
eventfun, add)
def _ondrag(self, item, fun, num=1, add=None):
"""Bind fun to mouse-move-event (with pressed mouse button) on turtle.
fun must be a function with two arguments, the coordinates of the
actual mouse position on the canvas.
num, the number of the mouse-button defaults to 1
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-Motion>" % num)
else:
def eventfun(event):
try:
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
except:
pass
self.cv.tag_bind(item, "<Button%s-Motion>" % num, eventfun, add)
def _onscreenclick(self, fun, num=1, add=None):
"""Bind fun to mouse-click event on canvas.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.unbind("<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.bind("<Button-%s>" % num, eventfun, add)
def _onkey(self, fun, key):
"""Bind fun to key-release event of key.
Canvas must have focus. See method listen
"""
if fun is None:
self.cv.unbind("<KeyRelease-%s>" % key, None)
else:
def eventfun(event):
fun()
self.cv.bind("<KeyRelease-%s>" % key, eventfun)
def _listen(self):
"""Set focus on canvas (in order to collect key-events)
"""
self.cv.focus_force()
def _ontimer(self, fun, t):
"""Install a timer, which calls fun after t milliseconds.
"""
if t == 0:
self.cv.after_idle(fun)
else:
self.cv.after(t, fun)
def _createimage(self, image):
"""Create and return image item on canvas.
"""
return self.cv.create_image(0, 0, image=image)
def _drawimage(self, item, (x, y), image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
self.cv.coords(item, (x * self.xscale, -y * self.yscale))
self.cv.itemconfig(item, image=image)
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
self.cv.itemconfig(item, image=image)
self.cv.tag_lower(item)
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
return self.cv.type(item)
def _pointlist(self, item):
"""returns list of coordinate-pairs of points of item
Example (for insiders):
>>> from turtle import *
>>> getscreen()._pointlist(getturtle().turtle._item)
[(0.0, 9.9999999999999982), (0.0, -9.9999999999999982),
(9.9999999999999982, 0.0)]
>>> """
cl = self.cv.coords(item)
pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)]
return pl
def _setscrollregion(self, srx1, sry1, srx2, sry2):
self.cv.config(scrollregion=(srx1, sry1, srx2, sry2))
def _rescale(self, xscalefactor, yscalefactor):
items = self.cv.find_all()
for item in items:
coordinates = self.cv.coords(item)
newcoordlist = []
while coordinates:
x, y = coordinates[:2]
newcoordlist.append(x * xscalefactor)
newcoordlist.append(y * yscalefactor)
coordinates = coordinates[2:]
self.cv.coords(item, *newcoordlist)
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
# needs amendment
if not isinstance(self.cv, ScrolledCanvas):
return self.canvwidth, self.canvheight
if canvwidth is canvheight is bg is None:
return self.cv.canvwidth, self.cv.canvheight
if canvwidth is not None:
self.canvwidth = canvwidth
if canvheight is not None:
self.canvheight = canvheight
self.cv.reset(canvwidth, canvheight, bg)
def _window_size(self):
""" Return the width and height of the turtle window.
"""
width = self.cv.winfo_width()
if width <= 1: # the window isn't managed by a geometry manager
width = self.cv['width']
height = self.cv.winfo_height()
if height <= 1: # the window isn't managed by a geometry manager
height = self.cv['height']
return width, height
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
Thus stops execution of turtle graphics script. Main purpose: use in
in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
class Shape(object):
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
### .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class Tbuffer(object):
"""Ring buffer used as undobuffer for RawTurtle objects."""
def __init__(self, bufsize=10):
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
self.cumulate = False
def reset(self, bufsize=None):
if bufsize is None:
for i in range(self.bufsize):
self.buffer[i] = [None]
else:
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
def push(self, item):
if self.bufsize > 0:
if not self.cumulate:
self.ptr = (self.ptr + 1) % self.bufsize
self.buffer[self.ptr] = item
else:
self.buffer[self.ptr].append(item)
def pop(self):
if self.bufsize > 0:
item = self.buffer[self.ptr]
if item is None:
return None
else:
self.buffer[self.ptr] = [None]
self.ptr = (self.ptr - 1) % self.bufsize
return (item)
def nr_of_items(self):
return self.bufsize - self.buffer.count([None])
def __repr__(self):
return str(self.buffer) + " " + str(self.ptr)
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
# _STANDARD_DELAY = 5
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
No argument.
Example (for a TurtleScreen instance named screen):
screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
for btn in 1, 2, 3:
self.onclick(None, btn)
for key in self._keys[:]:
self.onkey(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- on of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
left(10)
forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
# print "shape added:" , self._shapes
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> turtle.pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
fd(dist)
rt(90)
dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"Increment upadate counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
tracing = self._tracing
self._tracing = True
for t in self.turtles():
t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
num -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> screen.onclick(turtle.goto)
### Subsequently clicking into the TurtleScreen will
### make the turtle move to the clicked point.
>>> screen.onclick(None)
### event-binding will be removed
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
fd(50)
lt(60)
>>> screen.onkey(f, "Up")
>>> screen.listen()
### Subsequently the turtle can be moved by
### repeatedly pressing the up-arrow key,
### consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkey(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
if running:
fd(50)
lt(60)
screen.ontimer(f, 250)
>>> f() ### makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponding image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tuple, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
### e. g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
class TNavigator(object):
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self.tracer()
dl = self._delay()
if speed == 0:
self.tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self.tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen(object):
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1, 1)
self._tilt = 0
self._outlinewidth = 1
### self.screen = None # to override by child class
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage(object):
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
screen = self.screen # RawTurtle.screens[self.screenIndex]
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
elif isinstance(canvas, (ScrolledCanvas, Canvas)):
for screen in RawTurtle.screens:
if screen.cv == canvas:
self.screen = screen
break
else:
self.screen = TurtleScreen(canvas)
RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad cavas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
self.currentLineItem = screen._createline()
self.currentLine = [self._position]
self.items = [self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = Tbuffer(undobuffersize)
self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
,
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
for item in self.items:
self.screen._delete(item)
self.currentLineItem = self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
self.items = [self.currentLineItem]
self.clearstamps()
self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
self._update()
def _update_data(self):
self.screen._incrementudc()
if self.screen._updatecounter != 0:
return
if len(self.currentLine)>1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
self._update_data()
self._drawturtle()
screen._update() # TurtleScreenBase
screen._delay(screen._delayvalue) # TurtleScreenBase
else:
self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
screen._update()
def tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
turtle.fd(dist)
turtle.rt(90)
dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def clone(self):
"""Create and return a clone of the turtle.
No argument.
Create and return a clone of the turtle with same position, heading
and turtle properties.
Example (for a Turtle instance named mick):
mick = Turtle()
joe = mick.clone()
"""
screen = self.screen
self._newLine(self._drawing)
turtle = self.turtle
self.screen = None
self.turtle = None # too make self deepcopy-able
q = deepcopy(self)
self.screen = screen
self.turtle = turtle
q.screen = screen
q.turtle = _TurtleImage(screen, self.turtle.shapeIndex)
screen._turtles.append(q)
ttype = screen._shapes[self.turtle.shapeIndex]._type
if ttype == "polygon":
q.turtle._item = screen._createpoly()
elif ttype == "image":
q.turtle._item = screen._createimage(screen._shapes["blank"]._data)
elif ttype == "compound":
q.turtle._item = [screen._createpoly() for item in
screen._shapes[self.turtle.shapeIndex]._data]
q.currentLineItem = screen._createline()
q._update()
return q
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optinonal arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Optional argument:
angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self):
"""Return the current tilt-angle.
No argument.
Return the current tilt-angle, i. e. the angle between the
orientation of the turtleshape and the heading of the turtle
(its direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
>>>
"""
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
titem = self.turtle._item
if self._shown and screen._updatecounter == 0 and screen._tracing > 0:
self._hidden_from_screen = False
tshape = shape._data
if ttype == "polygon":
if self._resizemode == "noresize":
w = 1
shape = tshape
else:
if self._resizemode == "auto":
lx = ly = max(1, self._pensize/5.0)
w = self._pensize
tiltangle = 0
elif self._resizemode == "user":
lx, ly = self._stretchfactor
w = self._outlinewidth
tiltangle = self._tilt
shape = [(lx*x, ly*y) for (x, y) in tshape]
t0, t1 = math.sin(tiltangle), math.cos(tiltangle)
shape = [(t1*x+t0*y, -t0*x+t1*y) for (x, y) in shape]
shape = self._polytrafo(shape)
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(titem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
screen._drawimage(titem, self._position, tshape)
elif ttype == "compound":
lx, ly = self._stretchfactor
w = self._outlinewidth
for item, (poly, fc, oc) in zip(titem, tshape):
poly = [(lx*x, ly*y) for (x, y) in poly]
poly = self._polytrafo(poly)
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=w, top=True)
else:
if self._hidden_from_screen:
return
if ttype == "polygon":
screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "")
elif ttype == "image":
screen._drawimage(titem, self._position,
screen._shapes["blank"]._data)
elif ttype == "compound":
for item in titem:
screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "")
self._hidden_from_screen = True
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize":
w = 1
shape = tshape
else:
if self._resizemode == "auto":
lx = ly = max(1, self._pensize/5.0)
w = self._pensize
tiltangle = 0
elif self._resizemode == "user":
lx, ly = self._stretchfactor
w = self._outlinewidth
tiltangle = self._tilt
shape = [(lx*x, ly*y) for (x, y) in tshape]
t0, t1 = math.sin(tiltangle), math.cos(tiltangle)
shape = [(t1*x+t0*y, -t0*x+t1*y) for (x, y) in shape]
shape = self._polytrafo(shape)
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
lx, ly = self._stretchfactor
w = self._outlinewidth
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = [(lx*x, ly*y) for (x, y) in poly]
poly = self._polytrafo(poly)
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=w, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methodes for turtle movement depend
on this one.
"""
## Version mit undo-stuff
go_modes = ( self._drawing,
self._pencolor,
self._pensize,
isinstance(self._fillpath, list))
screen = self.screen
undo_entry = ("go", self._position, end, go_modes,
(self.currentLineItem,
self.currentLine[:],
screen._pointlist(self.currentLineItem),
self.items[:])
)
if self.undobuffer:
self.undobuffer.push(undo_entry)
start = self._position
if self._speed and screen._tracing == 1:
diff = (end-start)
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = start + delta * n
if self._drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
self._pencolor, self._pensize, top)
self._update()
if self._drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=self._pensize)
# Turtle now at end,
if self._drawing: # now update currentLine
self.currentLine.append(end)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
if self._creatingPoly:
self._poly.append(end)
if len(self.currentLine) > 42: # 42! answer to the ultimate question
# of life, the universe and everything
self._newLine()
self._update() #count=True)
def _undogoto(self, entry):
"""Reverse a _goto. Used for undo()
"""
old, new, go_modes, coodata = entry
drawing, pc, ps, filling = go_modes
cLI, cL, pl, items = coodata
screen = self.screen
if abs(self._position - new) > 0.5:
print "undogoto: HALLO-DA-STIMMT-WAS-NICHT!"
# restore former situation
self.currentLineItem = cLI
self.currentLine = cL
if pl == [(0, 0), (0, 0)]:
usepc = ""
else:
usepc = pc
screen._drawline(cLI, pl, fill=usepc, width=ps)
todelete = [i for i in self.items if (i not in items) and
(screen._type(i) == "line")]
for i in todelete:
screen._delete(i)
self.items.remove(i)
start = old
if self._speed and screen._tracing == 1:
diff = old - new
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = new + delta * n
if drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
pc, ps, top)
self._update()
if drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=ps)
# Turtle now at position old,
self._position = old
## if undo is done during creating a polygon, the last vertex
## will be deleted. if the polygon is entirely deleted,
## creatingPoly will be set to False.
## Polygons created before the last one will not be affected by undo()
if self._creatingPoly:
if len(self._poly) > 0:
self._poly.pop()
if self._poly == []:
self._creatingPoly = False
self._poly = None
if filling:
if self._fillpath == []:
self._fillpath = None
print "Unwahrscheinlich in _undogoto!"
elif self._fillpath is not None:
self._fillpath.pop()
self._update() #count=True)
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
if tracing == 1 and self._speed > 0:
anglevel = 3.0 * self._speed
steps = 1 + int(abs(angle)/anglevel)
delta = 1.0*angle/steps
for _ in range(steps):
self._orient = self._orient.rotate(delta)
self._update()
self._orient = neworient
self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
if len(self.currentLine) > 1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
self.currentLineItem = self.screen._createline()
self.items.append(self.currentLineItem)
else:
self.screen._drawline(self.currentLineItem, top=True)
self.currentLine = []
if usePos:
self.currentLine = [self._position]
def fill(self, flag=None):
"""Call fill(True) before drawing a shape to fill, fill(False) when done.
Optional argument:
flag -- True/False (or 1/0 respectively)
Call fill(True) before drawing the shape you want to fill,
and fill(False) when done.
When used without argument: return fillstate (True if filling,
False else)
Example (for a Turtle instance named turtle):
>>> turtle.fill(True)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.fill(False)
"""
filling = isinstance(self._fillpath, list)
if flag is None:
return filling
screen = self.screen
entry1 = entry2 = ()
if filling:
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
entry1 = ("dofill", self._fillitem)
if flag:
self._fillitem = self.screen._createpoly()
self.items.append(self._fillitem)
self._fillpath = [self._position]
entry2 = ("beginfill", self._fillitem) # , self._fillpath)
self._newLine()
else:
self._fillitem = self._fillpath = None
if self.undobuffer:
if entry1 == ():
if entry2 != ():
self.undobuffer.push(entry2)
else:
if entry2 == ():
self.undobuffer.push(entry1)
else:
self.undobuffer.push(["seq", entry1, entry2])
self._update()
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.end_fill()
"""
self.fill(True)
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.end_fill()
"""
self.fill(False)
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional arguments:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
#print "dot-1:", size, color
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
#print "dot-2:", size, color
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
#print "dot:", size, color, "item:", item
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly? -- 1st solution:
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def window_width(self):
""" Returns the width of the turtle window.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self.screen._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self.screen._window_size()[1]
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
##### event binding methods #####
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
add -- True or False. If True, new binding will be added, otherwise
it will replace a former binding.
Example for the anonymous turtle, i. e. the procedural way:
>>> def turn(x, y):
left(360)
>>> onclick(turn) # Now clicking into the turtle will turn it.
>>> onclick(None) # event-binding will be removed
"""
self.screen._onclick(self.turtle._item, fun, btn, add)
self._update()
def onrelease(self, fun, btn=1, add=None):
"""Bind fun to mouse-button-release event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
Example (for a MyTurtle instance named joe):
>>> class MyTurtle(Turtle):
def glow(self,x,y):
self.fillcolor("red")
def unglow(self,x,y):
self.fillcolor("")
>>> joe = MyTurtle()
>>> joe.onclick(joe.glow)
>>> joe.onrelease(joe.unglow)
### clicking on joe turns fillcolor red,
### unclicking turns it to transparent.
"""
self.screen._onrelease(self.turtle._item, fun, btn, add)
self._update()
def ondrag(self, fun, btn=1, add=None):
"""Bind fun to mouse-move event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
Example (for a Turtle instance named turtle):
>>> turtle.ondrag(turtle.goto)
### Subsequently clicking and dragging a Turtle will
### move it across the screen thereby producing handdrawings
### (if pen is down).
"""
self.screen._ondrag(self.turtle._item, fun, btn, add)
def _undo(self, action, data):
"""Does the main part of the work for undo()
"""
if self.undobuffer is None:
return
if action == "rot":
angle, degPAU = data
self._rotate(-angle*degPAU/self._degreesPerAU)
dummy = self.undobuffer.pop()
elif action == "stamp":
stitem = data[0]
self.clearstamp(stitem)
elif action == "go":
self._undogoto(data)
elif action in ["wri", "dot"]:
item = data[0]
self.screen._delete(item)
self.items.remove(item)
elif action == "dofill":
item = data[0]
self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)),
fill="", outline="")
elif action == "beginfill":
item = data[0]
self._fillitem = self._fillpath = None
self.screen._delete(item)
self.items.remove(item)
elif action == "pen":
TPen.pen(self, data[0])
self.undobuffer.pop()
def undo(self):
"""undo (repeatedly) the last turtle action.
No argument.
undo (repeatedly) the last turtle action.
Number of available undo actions is determined by the size of
the undobuffer.
Example (for a Turtle instance named turtle):
>>> for i in range(4):
turtle.fd(50); turtle.lt(80)
>>> for i in range(8):
turtle.undo()
"""
if self.undobuffer is None:
return
item = self.undobuffer.pop()
action = item[0]
data = item[1:]
if action == "seq":
while data:
item = data.pop()
self._undo(item[0], item[1:])
else:
self._undo(action, data)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
self._root.title(_Screen._title)
self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
def title(self, titlestring):
"""Set title of turtle-window
Argument:
titlestring -- a string, to appear in the titlebar of the
turtle graphics window.
This is a method of Screen-class. Not available for TurtleScreen-
objects.
Example (for a Screen instance named screen):
>>> screen.title("Welcome to the turtle-zoo!")
"""
if _Screen._root is not None:
_Screen._root.title(titlestring)
_Screen._title = titlestring
def _destroy(self):
root = self._root
if root is _Screen._root:
Turtle._pen = None
Turtle._screen = None
_Screen._root = None
_Screen._canvas = None
TurtleScreen._RUNNING = True
root.destroy()
def bye(self):
"""Shut the turtlegraphics window.
Example (for a TurtleScreen instance named screen):
>>> screen.bye()
"""
self._destroy()
def exitonclick(self):
"""Go into mainloop until the mouse is clicked.
No arguments.
Bind bye() method to mouseclick on TurtleScreen.
If "using_IDLE" - value in configuration dictionary is False
(default value), enter mainloop.
If IDLE with -n switch (no subprocess) is used, this value should be
set to True in turtle.cfg. In this case IDLE's mainloop
is active also for the client script.
This is a method of the Screen-class and not available for
TurtleScreen instances.
Example (for a Screen instance named screen):
>>> screen.exitonclick()
"""
def exitGracefully(x, y):
"""Screen.bye() with two dummy-parameters"""
self.bye()
self.onclick(exitGracefully)
if _CFG["using_IDLE"]:
return
try:
mainloop()
except AttributeError:
exit(0)
class Turtle(RawTurtle):
"""RawTurtle auto-creating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def _getpen():
"""Create the 'anonymous' turtle if not already present."""
if Turtle._pen is None:
Turtle._pen = Turtle()
return Turtle._pen
def _getscreen():
"""Create a TurtleScreen if not already present."""
if Turtle._screen is None:
Turtle._screen = Screen()
return Turtle._screen
def write_docstringdict(filename="turtle_docstringdict"):
"""Create and write docstring-dictionary to file.
Optional argument:
filename -- a string, used as filename
default value is turtle_docstringdict
Has to be called explicitly, (not used by the turtle-graphics classes)
The docstring dictionary will be written to the Python script <filname>.py
It is intended to serve as a template for translation of the docstrings
into different languages.
"""
docsdict = {}
for methodname in _tg_screen_functions:
key = "_Screen."+methodname
docsdict[key] = eval(key).__doc__
for methodname in _tg_turtle_functions:
key = "Turtle."+methodname
docsdict[key] = eval(key).__doc__
f = open("%s.py" % filename,"w")
keys = sorted([x for x in docsdict.keys()
if x.split('.')[1] not in _alias_list])
f.write('docsdict = {\n\n')
for key in keys[:-1]:
f.write('%s :\n' % repr(key))
f.write(' """%s\n""",\n\n' % docsdict[key])
key = keys[-1]
f.write('%s :\n' % repr(key))
f.write(' """%s\n"""\n\n' % docsdict[key])
f.write("}\n")
f.close()
def read_docstrings(lang):
"""Read in docstrings from lang-specific docstring dictionary.
Transfer docstrings, translated to lang, from a dictionary-file
to the methods of classes Screen and Turtle and - in revised form -
to the corresponding functions.
"""
modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()}
module = __import__(modname)
docsdict = module.docsdict
for key in docsdict:
#print key
try:
eval(key).im_func.__doc__ = docsdict[key]
except:
print "Bad docstring-entry: %s" % key
_LANGUAGE = _CFG["language"]
try:
if _LANGUAGE != "english":
read_docstrings(_LANGUAGE)
except ImportError:
print "Cannot find docsdict for", _LANGUAGE
except:
print ("Unknown Error when trying to import %s-docstring-dictionary" %
_LANGUAGE)
def getmethparlist(ob):
"Get strings describing the arguments for the given object"
argText1 = argText2 = ""
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
if type(ob)==types.MethodType:
fob = ob.im_func
argOffset = 1
else:
fob = ob
argOffset = 0
# Try and build one for Python defined functions
if type(fob) in [types.FunctionType, types.LambdaType]:
try:
counter = fob.func_code.co_argcount
items2 = list(fob.func_code.co_varnames[argOffset:counter])
realArgs = fob.func_code.co_varnames[argOffset:counter]
defaults = fob.func_defaults or []
defaults = list(map(lambda name: "=%s" % repr(name), defaults))
defaults = [""] * (len(realArgs)-len(defaults)) + defaults
items1 = map(lambda arg, dflt: arg+dflt, realArgs, defaults)
if fob.func_code.co_flags & 0x4:
items1.append("*"+fob.func_code.co_varnames[counter])
items2.append("*"+fob.func_code.co_varnames[counter])
counter += 1
if fob.func_code.co_flags & 0x8:
items1.append("**"+fob.func_code.co_varnames[counter])
items2.append("**"+fob.func_code.co_varnames[counter])
argText1 = ", ".join(items1)
argText1 = "(%s)" % argText1
argText2 = ", ".join(items2)
argText2 = "(%s)" % argText2
except:
pass
return argText1, argText2
def _turtle_docrevise(docstr):
"""To reduce docstrings from RawTurtle class for functions
"""
import re
if docstr is None:
return None
turtlename = _CFG["exampleturtle"]
newdocstr = docstr.replace("%s." % turtlename,"")
parexp = re.compile(r' \(.+ %s\):' % turtlename)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
def _screen_docrevise(docstr):
"""To reduce docstrings from TurtleScreen class for functions
"""
import re
if docstr is None:
return None
screenname = _CFG["examplescreen"]
newdocstr = docstr.replace("%s." % screenname,"")
parexp = re.compile(r' \(.+ %s\):' % screenname)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
## The following mechanism makes all methods of RawTurtle and Turtle available
## as functions. So we can enhance, change, add, delete methods to these
## classes and do not need to change anything here.
for methodname in _tg_screen_functions:
pl1, pl2 = getmethparlist(eval('_Screen.' + methodname))
if pl1 == "":
print ">>>>>>", pl1, pl2
continue
defstr = ("def %(key)s%(pl1)s: return _getscreen().%(key)s%(pl2)s" %
{'key':methodname, 'pl1':pl1, 'pl2':pl2})
exec defstr
eval(methodname).__doc__ = _screen_docrevise(eval('_Screen.'+methodname).__doc__)
for methodname in _tg_turtle_functions:
pl1, pl2 = getmethparlist(eval('Turtle.' + methodname))
if pl1 == "":
print ">>>>>>", pl1, pl2
continue
defstr = ("def %(key)s%(pl1)s: return _getpen().%(key)s%(pl2)s" %
{'key':methodname, 'pl1':pl1, 'pl2':pl2})
exec defstr
eval(methodname).__doc__ = _turtle_docrevise(eval('Turtle.'+methodname).__doc__)
done = mainloop = TK.mainloop
del pl1, pl2, defstr
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
fill(1)
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
fill(0)
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
fill(1)
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
fill(0)
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
laenge += 10
lt(15)
speed((speed()+1)%12)
end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
fill(1)
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
fill(0)
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
| bsd-2-clause |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/django/db/models/sql/subqueries.py | 48 | 8006 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if not innerq_used_tables or innerq_used_tables == self.tables:
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return 0
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass, related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| gpl-3.0 |
sandipbgt/nodeshot | nodeshot/core/api/views.py | 5 | 1434 | from django.core.urlresolvers import NoReverseMatch
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from .urls import urlpatterns
@api_view(('GET',))
def root_endpoint(request, format=None):
"""
List of all the available resources of this RESTful API.
"""
endpoints = []
# loop over url modules
for urlmodule in urlpatterns:
# is it a urlconf module?
if hasattr(urlmodule, 'urlconf_module'):
is_urlconf_module = True
else:
is_urlconf_module = False
# if url is really a urlmodule
if is_urlconf_module:
# loop over urls of that module
for url in urlmodule.urlconf_module.urlpatterns:
# TODO: configurable skip url in settings
# skip api-docs url
if url.name in ['django.swagger.resources.view']:
continue
# try adding url to list of urls to show
try:
endpoints.append({
'name': url.name.replace('api_', ''),
'url': reverse(url.name, request=request, format=format)
})
# urls of object details will fail silently (eg: /nodes/<slug>/)
except NoReverseMatch:
pass
return Response(endpoints)
| gpl-3.0 |
sugartom/tensorflow-alien | tensorflow/contrib/distributions/python/ops/mixture.py | 18 | 16672 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]):
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
# This was checked to not be None at construction time.
static_event_rank = self.event_shape.ndims
# Expand the rank of x up to static_event_rank times so that
# broadcasting works correctly.
def expand(x):
expanded_x = x
for _ in range(static_event_rank):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
cat_probs = [expand(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
hfaran/CitySportsLeague-Server | src/wlsports/api/team.py | 1 | 7752 | from collections import defaultdict
from random import choice, randint
from tornado_json.exceptions import api_assert, APIError
from tornado_json import schema
from pony.orm import db_session, CommitException, select, commit
from tornado.web import authenticated
from wlsports.db import Team as TeamEntity
from wlsports.db import Player as PlayerEntity
from wlsports.db import Sport as SportEntity
from wlsports.db import Game as GameEntity
from wlsports.handlers import APIHandler
from wlsports.util import invert_dict_nonunique
class Team(APIHandler):
@authenticated
@schema.validate(
input_schema={
"type": "object",
"properties": {
"usernames": {"type": "array"},
"name": {"type": "string"},
"sport": {"enum": ["Basketball", "Soccer"]}
},
"required": ["usernames", "name", "sport"]
},
output_schema={
"type": "object",
"properties": {
"name": {"type": "string"}
}
}
)
def put(self):
"""
PUT to create a team
* `name`
* `usernames`: list of players in team (INCLUDING YOURSELF!!)
* `sport`: One of "Basketball" or "Soccer"
"""
attrs = dict(self.body)
with db_session:
if TeamEntity.get(name=attrs['name']):
raise APIError(
409,
log_message="Team with name {} already exists!"
.format(attrs['name'])
)
# Add team mates
players = []
for pname in attrs["usernames"]:
player = PlayerEntity.get(username=pname)
api_assert(
player is not None,
400,
log_message="No player exists with name {}!".format(pname)
)
players.append(player)
# Get sport
sport = SportEntity[attrs['sport']]
# Create team
team = TeamEntity(
name=attrs['name'],
users=players,
sport=sport,
wins=0,
losses=0,
ties=0,
points_ratio=0.0
)
return {'name': team.name}
@schema.validate(
output_schema={
"type": "object",
"properties": {
"usernames": {"type": "array"},
"name": {"type": "string"},
"sport": {"enum": ["Basketball", "Soccer"]},
"games": {"type": "array"},
"wins": {"type": "number"},
"losses": {"type": "number"},
"ties": {"type": "number"},
"points_ratio": {"type": "number"}
}
},
)
def get(self, name):
"""
Get team with `name`
"""
with db_session:
team = TeamEntity.get(name=name)
if team is None:
raise APIError(
400,
log_message="Team with name {} does not exist!"
.format(name)
)
team_dict = team.to_dict(with_collections=True)
team_dict["usernames"] = team_dict.pop("users")
rankings = {k: i for i, (k, v) in enumerate(sorted(
get_team_rankings(team, filter_for_matchmaking=False).items(),
key=lambda x: x[1]
))}
my_ranking = rankings[team.name]
team_dict["ranking"] = "{}/{}".format(my_ranking+1, len(rankings))
return team_dict
class Matchmake(APIHandler):
@authenticated
@schema.validate(
input_schema={
"type": "object",
"properties": {
"team_name": {"type": "string"}
}
},
output_schema={
"type": "object",
"properties": {
"game_id": {"type": "number"}
}
}
)
def post(self):
"""
Does matchmaking by finding a rival team for the provided `team_name`,
creates a new game with the two teams and returns the game_id
for that game
"""
team_name = self.body['team_name']
with db_session:
myteam = TeamEntity.get(name=team_name)
api_assert(
PlayerEntity[self.get_current_user()] in myteam.users,
403,
log_message="You can only matchmake for teams that you are"
" a part of!"
)
overall_rankings = get_team_rankings(myteam)
myranking = overall_rankings[myteam.name]
ranking_vals = overall_rankings.values()
rankings_by_ranking = invert_dict_nonunique(overall_rankings)
who_you_verse_index = None
print(overall_rankings)
while (who_you_verse_index not in ranking_vals):
print(who_you_verse_index)
from time import sleep
sleep(0.4)
if len(rankings_by_ranking) == 1:
who_you_verse_index = myranking
else:
who_you_verse_index = choice(
[rval for rval in ranking_vals
if rval != myranking and
abs(rval - myranking) <= 10]
)
rival_team_name = rankings_by_ranking[who_you_verse_index][0]
if rival_team_name == myteam.name:
rival_team_name = rankings_by_ranking[who_you_verse_index][1]
rival_team = TeamEntity[rival_team_name]
game = GameEntity(
teams=[myteam, rival_team],
host=PlayerEntity[self.get_current_user()],
accepted_players=[PlayerEntity[self.get_current_user()]]
)
commit()
return {"game_id": game.id}
def get_team_rankings(myteam, filter_for_matchmaking=True):
team_name = myteam.name
sport_name = myteam.sport.name
if myteam is None:
raise APIError(
400,
log_message="Team with name {} does not exist!"
.format(team_name)
)
### Figure out rival team
# Find teams that are of the same sport and also
# that they don't contain any players from myteam
sport_teams = select(team for team in TeamEntity
if team.sport.name == sport_name)[:]
print sport_teams, [[player.username for player in team.users] for team in sport_teams ]
if filter_for_matchmaking:
myteam_names = [player.username for player in myteam.users]
print myteam_names
sport_teams = [team for team in sport_teams if all(
player.username not in myteam_names for player in team.users
)]
print sport_teams
sport_teams.append(myteam)
num_teams = len(sport_teams)
api_assert(
num_teams > 1,
409,
"There are no other teams with all different people!"
)
overall_rankings = defaultdict(lambda: 0)
teams_wlratio = sorted([
(team, float(team.wins) / (team.losses or 1))
for team in sport_teams
], key=lambda t: t[1], reverse=True)
for i, (team, wlratio) in enumerate(teams_wlratio):
overall_rankings[team.name] += i
teams_pointsratio = sorted([
(team, team.points_ratio) for team in sport_teams
], key=lambda t: t[1], reverse=True)
for i, (team, points_ratio) in enumerate(teams_pointsratio):
overall_rankings[team.name] += i
return overall_rankings
| mit |
ChristCoin7/christcoin | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
Ma233/musicbox | NEMbox/ui.py | 1 | 22170 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-08-24 21:51:57
# @Last Modified by: omi
# @Last Modified time: 2015-08-02 20:57:35
'''
网易云音乐 Ui
'''
import re
import curses
import terminalsize
from api import NetEase
import hashlib
from scrollstring import *
from storage import Storage
from config import Config
import logger
import os
import platform
log = logger.getLogger(__name__)
def escape_quote(text):
return text.replace('\'', '\\\'').replace('\"', '\'\'')
class Ui:
def __init__(self):
self.screen = curses.initscr()
self.screen.timeout(100) # the screen refresh every 100ms
# charactor break buffer
curses.cbreak()
self.screen.keypad(1)
self.netease = NetEase()
curses.start_color()
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
# term resize handling
size = terminalsize.get_terminal_size()
self.x = max(size[0], 10)
self.y = max(size[1], 25)
self.startcol = int(float(self.x) / 5)
self.indented_startcol = max(self.startcol - 3, 0)
self.update_space()
self.lyric = ""
self.now_lyric = ""
self.tlyric = ""
self.storage = Storage()
self.config = Config()
self.newversion = False
def notify(self, summary, song, album, artist):
if summary != "disable":
cmd = ""
body = "%s\nin %s by %s" % (song, album, artist)
if platform.system() == "Darwin":
content = escape_quote(summary + ': ' + body)
cmd = '/usr/bin/osascript -e $\'display notification "' + content + '"\''
else:
cmd = '/usr/bin/notify-send -a NetEase-MusicBox "%s" "%s"' % (summary, body)
os.system(cmd)
def build_playinfo(self, song_name, artist, album_name, quality, start, pause=False):
curses.noecho()
# refresh top 2 line
self.screen.move(1, 1)
self.screen.clrtoeol()
self.screen.move(2, 1)
self.screen.clrtoeol()
if pause:
self.screen.addstr(1, self.indented_startcol, '_ _ z Z Z ' + quality, curses.color_pair(3))
else:
self.screen.addstr(1, self.indented_startcol, '♫ ♪ ♫ ♪ ' + quality, curses.color_pair(3))
self.screen.addstr(1, min(self.indented_startcol + 18, self.x - 1),
song_name + self.space + artist + ' < ' + album_name + ' >',
curses.color_pair(4))
# The following script doesn't work. It is intended to scroll the playinfo
# Scrollstring works by determining how long since it is created, but
# playinfo is created everytime the screen refreshes (every 500ms), unlike
# the menu. Is there a workaround?
# name = song_name + self.space + artist + ' < ' + album_name + ' >'
# decides whether to scoll
# if truelen(name) <= self.x - self.indented_startcol - 18:
# self.screen.addstr(1, min(self.indented_startcol + 18, self.x-1),
# name,
# curses.color_pair(4))
# else:
# name = scrollstring(name + ' ', start)
# self.screen.addstr(1, min(self.indented_startcol + 18, self.x-1),
# str(name),
# curses.color_pair(4))
self.screen.refresh()
def build_process_bar(self, now_playing, total_length, playing_flag, pause_flag, playing_mode):
if (self.storage.database["player_info"]["idx"] >= len(self.storage.database["player_info"]["player_list"])):
return
curses.noecho()
self.screen.move(3, 1)
self.screen.clrtoeol()
self.screen.move(4, 1)
self.screen.clrtoeol()
if not playing_flag:
return
if total_length <= 0:
total_length = 1
if now_playing > total_length or now_playing <= 0:
now_playing = 0
process = "["
for i in range(0, 33):
if i < now_playing / total_length * 33:
if (i + 1) > now_playing / total_length * 33:
if not pause_flag:
process += ">"
continue
process += "="
else:
process += " "
process += "] "
now_minute = int(now_playing / 60)
if now_minute > 9:
now_minute = str(now_minute)
else:
now_minute = "0" + str(now_minute)
now_second = int(now_playing - int(now_playing / 60) * 60)
if now_second > 9:
now_second = str(now_second)
else:
now_second = "0" + str(now_second)
total_minute = int(total_length / 60)
if total_minute > 9:
total_minute = str(total_minute)
else:
total_minute = "0" + str(total_minute)
total_second = int(total_length - int(total_length / 60) * 60)
if total_second > 9:
total_second = str(total_second)
else:
total_second = "0" + str(total_second)
process += "(" + now_minute + ":" + now_second + "/" + total_minute + ":" + total_second + ")"
if playing_mode == 0:
process = "顺序播放 " + process
elif playing_mode == 1:
process = "顺序循环 " + process
elif playing_mode == 2:
process = "单曲循环 " + process
elif playing_mode == 3:
process = "随机播放 " + process
elif playing_mode == 4:
process = "随机循环 " + process
else:
pass
self.screen.addstr(3, self.startcol - 2, process, curses.color_pair(1))
song = self.storage.database["songs"][
self.storage.database["player_info"]["player_list"][self.storage.database["player_info"]["idx"]]
]
if 'lyric' not in song.keys() or len(song["lyric"]) <= 0:
self.now_lyric = "[00:00.00]暂无歌词 ~>_<~ \n"
else:
key = now_minute + ":" + now_second
for line in song["lyric"]:
if key in line:
if 'tlyric' not in song.keys() or len(song["tlyric"]) <= 0:
self.now_lyric = line
else:
self.now_lyric = line
for tline in song["tlyric"]:
if key in tline and self.config.get_item("translation"):
self.now_lyric = tline + " || " + self.now_lyric
self.now_lyric = re.sub('\[.*?\]', "", self.now_lyric)
self.screen.addstr(4, self.startcol - 2, str(self.now_lyric), curses.color_pair(3))
self.screen.refresh()
def build_loading(self):
self.screen.addstr(7, self.startcol, '享受高品质音乐,loading...', curses.color_pair(1))
self.screen.refresh()
# start is the timestamp of this function being called
def build_menu(self, datatype, title, datalist, offset, index, step, start):
# keep playing info in line 1
curses.noecho()
self.screen.move(5, 1)
self.screen.clrtobot()
self.screen.addstr(5, self.startcol, title, curses.color_pair(1))
if len(datalist) == 0:
self.screen.addstr(8, self.startcol, '这里什么都没有 -,-')
else:
if datatype == 'main':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol, '-> ' + str(i) + '. ' + datalist[i],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol, str(i) + '. ' + datalist[i])
elif datatype == 'songs' or datatype == 'fmsongs':
iter_range = min(len(datalist), offset + step)
for i in range(offset, iter_range):
# this item is focus
if i == index:
self.screen.addstr(i - offset + 8, 0, ' ' * self.startcol)
lead = '-> ' + str(i) + '. '
self.screen.addstr(i - offset + 8, self.indented_startcol, lead, curses.color_pair(2))
name = str(datalist[i]['song_name'] + self.space + datalist[i][
'artist'] + ' < ' + datalist[i]['album_name'] + ' >')
# the length decides whether to scoll
if truelen(name) < self.x - self.startcol - 1:
self.screen.addstr(i - offset + 8, self.indented_startcol + len(lead),
name,
curses.color_pair(2))
else:
name = scrollstring(name + ' ', start)
self.screen.addstr(i - offset + 8, self.indented_startcol + len(lead),
str(name),
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 8, 0, ' ' * self.startcol)
self.screen.addstr(i - offset + 8, self.startcol,
str(str(i) + '. ' + datalist[i]['song_name'] + self.space + datalist[i][
'artist'] + ' < ' + datalist[i]['album_name'] + ' >')[:int(self.x * 2)])
self.screen.addstr(iter_range - offset + 9, 0, ' ' * self.x)
elif datatype == 'artists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['artists_name'] + self.space + str(
datalist[i]['alias']), curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i]['artists_name'] + self.space + datalist[i][
'alias'])
elif datatype == 'albums':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['albums_name'] + self.space +
datalist[i][
'artists_name'], curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i]['albums_name'] + self.space + datalist[i][
'artists_name'])
elif datatype == 'playlists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['title'],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol, str(i) + '. ' + datalist[i]['title'])
elif datatype == 'top_playlists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['playlists_name'] + self.space +
datalist[i]['creator_name'], curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i]['playlists_name'] + self.space + datalist[i][
'creator_name'])
elif datatype == 'toplists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol, '-> ' + str(i) + '. ' + datalist[i],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol, str(i) + '. ' + datalist[i])
elif datatype == 'playlist_classes' or datatype == 'playlist_class_detail':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol, '-> ' + str(i) + '. ' + datalist[i],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol, str(i) + '. ' + datalist[i])
elif datatype == 'djchannels':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 8, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['song_name'],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 8, self.startcol, str(i) + '. ' + datalist[i]['song_name'])
elif datatype == 'search':
self.screen.move(6, 1)
self.screen.clrtobot()
self.screen.timeout(-1)
self.screen.addstr(8, self.startcol, '选择搜索类型:', curses.color_pair(1))
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 10, self.indented_startcol,
'-> ' + str(i) + '.' + datalist[i - 1],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 10, self.startcol, str(i) + '.' + datalist[i - 1])
self.screen.timeout(100)
elif datatype == 'help':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. \'' + (datalist[i][0].upper() + '\'').ljust(11) +
datalist[i][
1] + ' ' + datalist[i][2], curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. \'' + (datalist[i][0].upper() + '\'').ljust(11) + datalist[i][
1] + ' ' +
datalist[i][2])
self.screen.addstr(20, 6, 'NetEase-MusicBox 基于Python,所有版权音乐来源于网易,本地不做任何保存')
self.screen.addstr(21, 10, '按 [G] 到 Github 了解更多信息,帮助改进,或者Star表示支持~~')
self.screen.addstr(22, self.startcol, 'Build with love to music by omi')
self.screen.refresh()
def build_search(self, stype):
self.screen.timeout(-1)
netease = self.netease
if stype == 'songs':
song_name = self.get_param('搜索歌曲:')
if song_name == '/return':
return []
else:
try:
data = netease.search(song_name, stype=1)
song_ids = []
if 'songs' in data['result']:
if 'mp3Url' in data['result']['songs']:
songs = data['result']['songs']
# if search song result do not has mp3Url
# send ids to get mp3Url
else:
for i in range(0, len(data['result']['songs'])):
song_ids.append(data['result']['songs'][i]['id'])
songs = netease.songs_detail(song_ids)
return netease.dig_info(songs, 'songs')
except:
return []
elif stype == 'artists':
artist_name = self.get_param('搜索艺术家:')
if artist_name == '/return':
return []
else:
try:
data = netease.search(artist_name, stype=100)
if 'artists' in data['result']:
artists = data['result']['artists']
return netease.dig_info(artists, 'artists')
except:
return []
elif stype == 'albums':
albums_name = self.get_param('搜索专辑:')
if albums_name == '/return':
return []
else:
try:
data = netease.search(albums_name, stype=10)
if 'albums' in data['result']:
albums = data['result']['albums']
return netease.dig_info(albums, 'albums')
except:
return []
elif stype == 'search_playlist':
search_playlist = self.get_param('搜索网易精选集:')
if search_playlist == '/return':
return []
else:
try:
data = netease.search(search_playlist, stype=1000)
if 'playlists' in data['result']:
playlists = data['result']['playlists']
return netease.dig_info(playlists, 'top_playlists')
except:
return []
return []
def build_login(self):
self.build_login_bar()
local_account = self.get_account()
local_password = hashlib.md5(self.get_password()).hexdigest()
login_info = self.netease.login(local_account, local_password)
account = [local_account, local_password]
if login_info['code'] != 200:
x = self.build_login_error()
if x == ord('1'):
return self.build_login()
else:
return -1
else:
return [login_info, account]
def build_login_bar(self):
curses.noecho()
self.screen.move(4, 1)
self.screen.clrtobot()
self.screen.addstr(5, self.startcol, '请输入登录信息(支持手机登陆)', curses.color_pair(1))
self.screen.addstr(8, self.startcol, "账号:", curses.color_pair(1))
self.screen.addstr(9, self.startcol, "密码:", curses.color_pair(1))
self.screen.move(8, 24)
self.screen.refresh()
def build_login_error(self):
self.screen.move(4, 1)
self.screen.timeout(-1) # disable the screen timeout
self.screen.clrtobot()
self.screen.addstr(8, self.startcol, '艾玛,登录信息好像不对呢 (O_O)#', curses.color_pair(1))
self.screen.addstr(10, self.startcol, '[1] 再试一次')
self.screen.addstr(11, self.startcol, '[2] 稍后再试')
self.screen.addstr(14, self.startcol, '请键入对应数字:', curses.color_pair(2))
self.screen.refresh()
x = self.screen.getch()
self.screen.timeout(100) # restore the screen timeout
return x
def get_account(self):
self.screen.timeout(-1) # disable the screen timeout
curses.echo()
account = self.screen.getstr(8, self.startcol + 6, 60)
self.screen.timeout(100) # restore the screen timeout
return account
def get_password(self):
self.screen.timeout(-1) # disable the screen timeout
curses.noecho()
password = self.screen.getstr(9, self.startcol + 6, 60)
self.screen.timeout(100) # restore the screen timeout
return password
def get_param(self, prompt_string):
# keep playing info in line 1
curses.echo()
self.screen.move(4, 1)
self.screen.clrtobot()
self.screen.addstr(5, self.startcol, prompt_string, curses.color_pair(1))
self.screen.refresh()
info = self.screen.getstr(10, self.startcol, 60)
if info == '':
return '/return'
elif info.strip() is '':
return self.get_param(prompt_string)
else:
return info
def update_size(self):
# get terminal size
size = terminalsize.get_terminal_size()
self.x = max(size[0], 10)
self.y = max(size[1], 25)
# update intendations
curses.resizeterm(self.y, self.x)
self.startcol = int(float(self.x) / 5)
self.indented_startcol = max(self.startcol - 3, 0)
self.update_space()
self.screen.clear()
self.screen.refresh()
def update_space(self):
if self.x > 140:
self.space = " - "
elif self.x > 80:
self.space = " - "
else:
self.space = " - "
self.screen.refresh()
| mit |
bearstech/ansible | lib/ansible/plugins/action/dellos6_config.py | 111 | 4194 | # Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.dellos6 import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
SDSG-Invenio/invenio | invenio/ext/jinja2hacks.py | 11 | 5293 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Support passing legacy Invenio str objects to Jinja2 templates."""
import warnings
try:
from markupsafe import Markup as jinja2_Markup, escape as jinja2_escape
except ImportError:
from jinja2._markupsafe import Markup as jinja2_Markup, \
escape as jinja2_escape
from invenio.utils.deprecation import RemovedInInvenio22Warning
warnings.warn(
"Jinja2Hacks will be disabled in 2.1 and removed in 2.2. "
"Please convert all strings in Jinja2 templates to unicode.",
RemovedInInvenio22Warning
)
def setup_app(app):
"""Jinja2 require all strings to be unicode objects.
Invenio however operates with UTF8 encoded str objects. Jinja2 will
automatically convert non-unicode objects into unicode objects, but via the
ascii codec. This function replaces the escape function and Markup class in
Jinja2/MarkupSafe, to use the utf8 codec when converting 8-bit str objects
into unicode objects.
Ideally Jinja2/MarkupSafe should allow specifying which default encoding to
use when decoding strings. Other alternatives is to decode any str object
into unicode prior to passing the values to Jinja2 methods. This will
however require large changes over the entire Invenio codebase, with the
risk of introducing many errors. This runtime hack is unfortunately
currently the least intrusive way to fix the str to unicode decoding.
"""
# Jinja2 will try to load escape method and Markup class from a variety of
# different modules. First it will try from MarkupSafe package, then from
# jinja2._markupsafe._speedup, then jinja2._markupsafe._native. Ideally, we
# should only replace the function and class at the implementing module.
# However, due to Python's package/module loading behaviour, the function
# and class will be imported into other jinja2 modules as soon as we try to
# import the module implementing the function and class. Hence, we need to
# replace the function and class in the modules where it has already been
# imported.
import jinja2
import jinja2.runtime
import jinja2.utils
import jinja2.nodes
import jinja2.filters
import jinja2.ext
import jinja2.environment
import jinja2.compiler
# Escape function replacement in Jinja2 library
try:
jinja2._markupsafe.escape = utf8escape
except AttributeError:
pass
jinja2.runtime.escape = utf8escape
jinja2.utils.escape = utf8escape
jinja2.filters.escape = utf8escape
jinja2.compiler.escape = utf8escape
jinja2.escape = utf8escape
# Markup class replacement in Jinja2 library
try:
jinja2._markupsafe.Markup = Markup
except AttributeError:
pass
jinja2.runtime.Markup = Markup
jinja2.utils.Markup = Markup
jinja2.filters.Markup = Markup
jinja2.compiler.Markup = Markup
jinja2.Markup = Markup
jinja2.nodes.Markup = Markup
jinja2.ext.Markup = Markup
jinja2.environment.Markup = Markup
# Escape/Markup replacement in MarkupSafe library.
# FIXME causes recursive calls in `Markup.__new__` and `escape`
# try:
# import markupsafe
# markupsafe.escape = utf8escape
# #markupsafe.Markup = Markup
# except ImportError:
# pass
return app
def utf8escape(s):
"""UTF8-8-bit-string-friendly replacement for MarkupSafe escape function.
WARNING: Do not use this method. Use jinja2.escape() instead.
"""
if isinstance(s, str):
warnings.warn("Convert string '{0}' in template to unicode.".format(s),
RuntimeWarning, stacklevel=3)
return jinja2_escape(s.decode('utf8'))
return jinja2_escape(s)
# Ensure function name is identical to replaced function.
utf8escape.__name__ = jinja2_escape.__name__
class Markup(jinja2_Markup):
"""Markup replacement class.
Forces the use of utf8 codec for decoding 8-bit strings, in case no
encoding is specified.
WARNING: Do not use this class. Use jinja2.Markup instead.
"""
def __new__(cls, base=u'', encoding=None, errors='strict'):
"""Add encoding for base of type str."""
if encoding is None and isinstance(base, str):
encoding = 'utf8'
warnings.warn(
"Convert string '{0}' in template to unicode.".format(base),
RuntimeWarning, stacklevel=3)
return jinja2_Markup.__new__(cls, base=base, encoding=encoding,
errors=errors)
| gpl-2.0 |
byran/TeamCityDemo | gtest-1.7.0/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
BuildingLink/sentry | src/sentry/runner/commands/cleanup.py | 4 | 6607 | """
sentry.runner.commands.cleanup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import click
from datetime import timedelta
from django.utils import timezone
from sentry.runner.decorators import configuration
def get_project(value):
from sentry.models import Project
try:
if value.isdigit():
return int(value)
if '/' not in value:
return None
org, proj = value.split('/', 1)
return Project.objects.get_from_cache(
organization__slug=org,
slug=proj,
).id
except Project.DoesNotExist:
return None
@click.command()
@click.option('--days', default=30, show_default=True, help='Numbers of days to truncate on.')
@click.option('--project', help='Limit truncation to only entries from project.')
@click.option('--concurrency', type=int, default=1, show_default=True, help='The number of concurrent workers to run.')
@click.option('--silent', '-q', default=False, is_flag=True, help='Run quietly. No output on success.')
@click.option('--model', '-m', multiple=True)
@configuration
def cleanup(days, project, concurrency, silent, model):
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project you want to limit this to this can be
done with the `--project` flag which accepts a project ID or a string
with the form `org/project` where both are slugs.
"""
from sentry.app import nodestore
from sentry.db.deletion import BulkDeleteQuery
from sentry.models import (
Event, EventMapping, Group, GroupRuleStatus, GroupTagValue,
LostPasswordHash, TagValue, GroupEmailThread,
)
models = {m.lower() for m in model}
def is_filtered(model):
if not models:
return False
return model.lower() not in models
# these models should be safe to delete without cascades, in order
BULK_DELETES = (
(GroupRuleStatus, 'date_added'),
(GroupTagValue, 'last_seen'),
(TagValue, 'last_seen'),
(GroupEmailThread, 'date'),
)
GENERIC_DELETES = (
(Event, 'datetime'),
(Group, 'last_seen'),
)
if not silent:
click.echo("Removing expired values for LostPasswordHash")
if is_filtered('LostPasswordHash'):
if not silent:
click.echo('>> Skipping LostPasswordHash')
else:
LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
project_id = None
if project:
click.echo("Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo('Error: Project not found', err=True)
raise click.Abort()
else:
if not silent:
click.echo("Removing old NodeStore values")
if is_filtered('NodeStore'):
if not silent:
click.echo('>> Skipping NodeStore')
else:
cutoff = timezone.now() - timedelta(days=days)
try:
nodestore.cleanup(cutoff)
except NotImplementedError:
click.echo("NodeStore backend does not support cleanup operation", err=True)
for model, dtfield in BULK_DELETES:
if not silent:
click.echo("Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
))
if is_filtered(model.__name__):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
).execute()
# EventMapping is fairly expensive and is special cased as it's likely you
# won't need a reference to an event for nearly as long
if not silent:
click.echo("Removing expired values for EventMapping")
if is_filtered('EventMapping'):
if not silent:
click.echo('>> Skipping EventMapping')
else:
BulkDeleteQuery(
model=EventMapping,
dtfield='date_added',
days=min(days, 7),
project_id=project_id,
).execute()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if not silent:
click.echo("Cleaning up unused FileBlob references")
if is_filtered('FileBlob'):
if not silent:
click.echo('>> Skipping FileBlob')
else:
cleanup_unused_files(silent)
for model, dtfield in GENERIC_DELETES:
if not silent:
click.echo("Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
))
if is_filtered(model.__name__):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
).execute_generic()
def cleanup_unused_files(quiet=False):
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models import File, FileBlob, FileBlobIndex
if quiet:
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(
timestamp__lte=cutoff,
)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
| bsd-3-clause |
datacratic/StarCluster | starcluster/static.py | 1 | 12294 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
"""
Module for storing static data structures
"""
import os
import sys
import getpass
import tempfile
def __expand_all(path):
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def __expand_all_in_list(lst):
for i, path in enumerate(lst):
lst[i] = __expand_all(path)
return lst
def __makedirs(path, exit_on_failure=False):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if exit_on_failure:
sys.stderr.write("!!! ERROR - %s *must* be a directory\n" %
path)
elif not os.path.isdir(path) and exit_on_failure:
sys.stderr.write("!!! ERROR - %s *must* be a directory\n" % path)
sys.exit(1)
def create_sc_config_dirs():
__makedirs(STARCLUSTER_CFG_DIR, exit_on_failure=True)
__makedirs(STARCLUSTER_PLUGIN_DIR)
__makedirs(STARCLUSTER_LOG_DIR)
VERSION = "0.9999"
PID = os.getpid()
TMP_DIR = tempfile.gettempdir()
if os.path.exists("/tmp"):
TMP_DIR = "/tmp"
CURRENT_USER = 'unknown_user'
try:
CURRENT_USER = getpass.getuser()
except:
pass
SSH_TEMPLATE = 'ssh %(opts)s %(user)s@%(host)s'
STARCLUSTER_CFG_DIR = os.path.join(os.path.expanduser('~'), '.starcluster')
STARCLUSTER_CFG_FILE = os.path.join(STARCLUSTER_CFG_DIR, 'config')
STARCLUSTER_PLUGIN_DIR = os.path.join(STARCLUSTER_CFG_DIR, 'plugins')
STARCLUSTER_LOG_DIR = os.path.join(STARCLUSTER_CFG_DIR, 'logs')
STARCLUSTER_RECEIPT_DIR = "/var/run/starcluster"
STARCLUSTER_RECEIPT_FILE = os.path.join(STARCLUSTER_RECEIPT_DIR, "receipt.pkl")
STARCLUSTER_OWNER_ID = 342652561657
DEBUG_FILE = os.path.join(STARCLUSTER_LOG_DIR, 'debug.log')
SSH_DEBUG_FILE = os.path.join(STARCLUSTER_LOG_DIR, 'ssh-debug.log')
AWS_DEBUG_FILE = os.path.join(STARCLUSTER_LOG_DIR, 'aws-debug.log')
CRASH_FILE = os.path.join(STARCLUSTER_LOG_DIR, 'crash-report-%d.txt' % PID)
# StarCluster BASE AMIs (us-east-1)
BASE_AMI_32 = "ami-9bf9c9f2"
BASE_AMI_64 = "ami-3393a45a"
BASE_AMI_HVM = "ami-6b211202"
SECURITY_GROUP_PREFIX = "@sc-"
SECURITY_GROUP_TEMPLATE = SECURITY_GROUP_PREFIX + "%s"
VOLUME_GROUP_NAME = "volumecreator"
VOLUME_GROUP = SECURITY_GROUP_PREFIX + VOLUME_GROUP_NAME
# Cluster group tag keys
VERSION_TAG = SECURITY_GROUP_PREFIX + 'version'
CORE_TAG = SECURITY_GROUP_PREFIX + 'core'
USER_TAG = SECURITY_GROUP_PREFIX + 'user'
MAX_TAG_LEN = 255
# Internal StarCluster userdata filenames
UD_PLUGINS_FNAME = "_sc_plugins.txt"
UD_VOLUMES_FNAME = "_sc_volumes.txt"
UD_ALIASES_FNAME = "_sc_aliases.txt"
INSTANCE_METADATA_URI = "http://169.254.169.254/latest"
INSTANCE_STATES = ['pending', 'running', 'shutting-down',
'terminated', 'stopping', 'stopped']
VOLUME_STATUS = ['creating', 'available', 'in-use',
'deleting', 'deleted', 'error']
VOLUME_ATTACH_STATUS = ['attaching', 'attached', 'detaching', 'detached']
INSTANCE_TYPES = {
't1.micro': ['i386', 'x86_64'],
't2.nano': ['i386', 'x86_64'],
't2.micro': ['i386', 'x86_64'],
't2.small': ['i386', 'x86_64'],
't2.medium': ['i386', 'x86_64'],
'm1.small': ['i386', 'x86_64'],
'm1.medium': ['i386', 'x86_64'],
'm1.large': ['x86_64'],
'm1.xlarge': ['x86_64'],
'c1.medium': ['i386', 'x86_64'],
'c1.xlarge': ['x86_64'],
'm2.xlarge': ['x86_64'],
'm2.2xlarge': ['x86_64'],
'm2.4xlarge': ['x86_64'],
'm3.medium': ['x86_64'],
'm3.large': ['x86_64'],
'm3.xlarge': ['x86_64'],
'm3.2xlarge': ['x86_64'],
'm4.large': ['x86_64'],
'm4.xlarge': ['x86_64'],
'm4.2xlarge': ['x86_64'],
'm4.4xlarge': ['x86_64'],
'm4.10xlarge': ['x86_64'],
'm4.16xlarge': ['x86_64'],
'r3.large': ['x86_64'],
'r3.xlarge': ['x86_64'],
'r3.2xlarge': ['x86_64'],
'r3.4xlarge': ['x86_64'],
'r3.8xlarge': ['x86_64'],
'x1.32xlarge': ['x86_64'],
'x1.16xlarge': ['x86_64'],
'r4.large': ['x86_64'],
'r4.xlarge': ['x86_64'],
'r4.2xlarge': ['x86_64'],
'r4.4xlarge': ['x86_64'],
'r4.8xlarge': ['x86_64'],
'r4.16xlarge': ['x86_64'],
'cc1.4xlarge': ['x86_64'],
'cc2.8xlarge': ['x86_64'],
'cg1.4xlarge': ['x86_64'],
'g2.2xlarge': ['x86_64'],
'g2.8xlarge': ['x86_64'],
'p2.xlarge': ['x86_64'],
'p2.8xlarge': ['x86_64'],
'p2.16xlarge': ['x86_64'],
'cr1.8xlarge': ['x86_64'],
'hi1.4xlarge': ['x86_64'],
'hs1.8xlarge': ['x86_64'],
'c3.large': ['x86_64'],
'c3.xlarge': ['x86_64'],
'c3.2xlarge': ['x86_64'],
'c3.4xlarge': ['x86_64'],
'c3.8xlarge': ['x86_64'],
'c4.large': ['x86_64'],
'c4.xlarge': ['x86_64'],
'c4.2xlarge': ['x86_64'],
'c4.4xlarge': ['x86_64'],
'c4.8xlarge': ['x86_64'],
'i2.xlarge': ['x86_64'],
'i2.2xlarge': ['x86_64'],
'i2.4xlarge': ['x86_64'],
'i2.8xlarge': ['x86_64'],
'd2.xlarge': ['x86_64'],
'd2.2xlarge': ['x86_64'],
'd2.4xlarge': ['x86_64'],
'd2.8xlarge': ['x86_64']
}
T1_INSTANCE_TYPES = ['t1.micro']
T2_INSTANCE_TYPES = ['t2.nano', 't2.micro', 't2.small', 't2.medium']
SEC_GEN_TYPES = ['m3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge']
CLUSTER_COMPUTE_TYPES = ['cc1.4xlarge', 'cc2.8xlarge']
CLUSTER_GPU_TYPES = [
'p2.xlarge', 'p2.8xlarge', 'p2.16xlarge',
'g2.2xlarge', 'g2.8xlarge',
'cg1.4xlarge',
]
CLUSTER_HIMEM_TYPES = ['cr1.8xlarge']
HIMEM_TYPES = ['r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge',
'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge',
'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 'x1.32xlarge',
'x1.16xlarge']
HI_IO_TYPES = ['hi1.4xlarge']
HI_STORAGE_TYPES = ['hs1.8xlarge']
M3_COMPUTE_TYPES = ['c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge',
'c3.8xlarge']
M4_COMPUTE_TYPES = ['c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge',
'c4.8xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge',
'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge']
I2_STORAGE_TYPES = ['i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge']
DENSE_STORAGE_TYPES = ['d2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge']
HVM_ONLY_TYPES = (CLUSTER_COMPUTE_TYPES + CLUSTER_GPU_TYPES +
CLUSTER_HIMEM_TYPES + I2_STORAGE_TYPES + HIMEM_TYPES +
T2_INSTANCE_TYPES + DENSE_STORAGE_TYPES + M4_COMPUTE_TYPES)
HVM_TYPES = (HVM_ONLY_TYPES + HI_IO_TYPES + HI_STORAGE_TYPES + SEC_GEN_TYPES +
M3_COMPUTE_TYPES)
EBS_ONLY_TYPES = T1_INSTANCE_TYPES + T2_INSTANCE_TYPES
# Always make sure these match instances listed here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html
# StarCluster additionally adds cc1.4xlarge to the list - EC2 is slowly
# migrating folks away from this type in favor of cc2.8xlarge but the type
# still works for some older accounts.
PLACEMENT_GROUP_TYPES = (M3_COMPUTE_TYPES + HVM_ONLY_TYPES +
HI_IO_TYPES + HI_STORAGE_TYPES)
# T2 instances are HVM_ONLY_TYPES however they're not compatible with placement
# groups so remove them from the list
for itype in T2_INSTANCE_TYPES:
PLACEMENT_GROUP_TYPES.remove(itype)
# Only add a region to this list after testing that you can create and delete a
# placement group there.
PLACEMENT_GROUP_REGIONS = ['us-east-1', 'us-west-2', 'eu-west-1',
'ap-northeast-1', 'ap-southeast-1',
'ap-southeast-2']
PROTOCOLS = ['tcp', 'udp', 'icmp']
WORLD_CIDRIP = '0.0.0.0/0'
DEFAULT_SSH_PORT = 22
AVAILABLE_SHELLS = {
"bash": True,
"zsh": True,
"csh": True,
"ksh": True,
"tcsh": True,
}
GLOBAL_SETTINGS = {
# setting, type, required?, default, options, callback
'default_template': (str, False, None, None, None),
'enable_experimental': (bool, False, False, None, None),
'refresh_interval': (int, False, 30, None, None),
'web_browser': (str, False, None, None, None),
'include': (list, False, [], None, None),
}
AWS_SETTINGS = {
'aws_access_key_id': (str, False, None, None, None),
'aws_secret_access_key': (str, False, None, None, None),
'aws_user_id': (str, False, None, None, None),
'ec2_cert': (str, False, None, None, __expand_all),
'ec2_private_key': (str, False, None, None, __expand_all),
'aws_port': (int, False, None, None, None),
'aws_ec2_path': (str, False, '/', None, None),
'aws_s3_path': (str, False, '/', None, None),
'aws_is_secure': (bool, False, True, None, None),
'aws_region_name': (str, False, None, None, None),
'aws_region_host': (str, False, None, None, None),
'aws_s3_host': (str, False, None, None, None),
'aws_proxy': (str, False, None, None, None),
'aws_proxy_port': (int, False, None, None, None),
'aws_proxy_user': (str, False, None, None, None),
'aws_proxy_pass': (str, False, None, None, None),
'aws_validate_certs': (bool, False, True, None, None),
}
KEY_SETTINGS = {
'key_location': (str, True, None, None, __expand_all),
}
EBS_VOLUME_SETTINGS = {
'volume_id': (str, True, None, None, None),
'device': (str, False, None, None, None),
'partition': (int, False, None, None, None),
'mount_path': (str, True, None, None, None),
}
PLUGIN_SETTINGS = {
'setup_class': (str, True, None, None, None),
}
PERMISSION_SETTINGS = {
# either you're specifying an ip-based rule
'ip_protocol': (str, False, 'tcp', PROTOCOLS, None),
'from_port': (int, True, None, None, None),
'to_port': (int, True, None, None, None),
'cidr_ip': (str, False, '0.0.0.0/0', None, None),
# or you're allowing full access to another security group
# skip this for now...these two options are mutually exclusive to
# the four settings above and source_group is less commonly
# used. address this when someone requests it.
# 'source_group': (str, False, None),
# 'source_group_owner': (int, False, None),
}
CLUSTER_SETTINGS = {
'spot_bid': (float, False, None, None, None),
'cluster_size': (int, True, None, None, None),
'cluster_user': (str, False, 'sgeadmin', None, None),
'cluster_shell': (str, False, 'bash', AVAILABLE_SHELLS.keys(), None),
'public_ips': (bool, False, None, None, None),
'master_image_id': (str, False, None, None, None),
'master_instance_type': (str, False, None, INSTANCE_TYPES.keys(), None),
'node_image_id': (str, False, None, None, None),
'node_instance_type': (list, False, [], None, None),
'node_instance_array': (list, False, [], None, None),
'availability_zone': (str, False, None, None, None),
'keyname': (str, True, None, None, None),
'extends': (str, False, None, None, None),
'volumes': (list, False, [], None, None),
'plugins': (list, False, [], None, None),
'permissions': (list, False, [], None, None),
'userdata_scripts': (list, False, [], None, __expand_all_in_list),
'disable_queue': (bool, False, False, None, None),
'force_spot_master': (bool, False, False, None, None),
'disable_cloudinit': (bool, False, False, None, None),
'dns_prefix': (bool, False, False, None, None),
'dns_suffix': (bool, False, False, None, None),
'subnet_ids': (list, False, [], None, None),
'impaired_threshold_sec': (int, False, 120, None, None)
}
NODE_SETTINGS = {
'size': (int, False, 0, None, None),
'spot_bid': (float, False, None, None, None),
'image_id': (str, True, None, None, None),
'instance_type': (str, True, None, INSTANCE_TYPES.keys(), None),
'selection_factor': (float, False, 1, None, None)
}
MASTER_CFG_FILE = '/etc/starcluster' # vanilla improvements
| gpl-3.0 |
steventhan/learning-journal | learning_journal/scripts/initializedb.py | 1 | 1080 | import os
import sys
import transaction
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models.meta import Base
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models import Entry
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
settings["sqlalchemy.url"] = os.environ["DATABASE_URL"]
engine = get_engine(settings)
Base.metadata.create_all(engine)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
model = Entry(title='Test', body='<h1>Test</h1>')
dbsession.add(model)
| mit |
hef/samba | examples/scripts/vfs/media_harmony/trigger_avid_update.py | 44 | 3225 | #!/usr/bin/python
import os, socket, sys, stat
######################################################################
##
## trigger_avid_update.py for media_harmony VFS module.
##
## Copyright (C) Andrew Klaassen 2012.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
######################################################################
#
# Change avid_shares and ip_prefix as appropriate for your network.
#
avid_shares = (
'\\\\mediaharmony01\\project1\\',
'\\\\mediaharmony01\\project2\\',
'\\\\mediaharmony01\\project3\\',
)
ip_prefix = '192.168.1.'
if __name__ == "__main__":
my_ips = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if ip[:len(ip_prefix)] == ip_prefix]
if not my_ips:
print 'No IP address found. Aborting.'
dummy = raw_input("\nPress Enter to finish: ")
sys.exit()
my_ip = my_ips[0]
my_name = os.environ.get('USERNAME')
for avid_share in avid_shares:
media_dirs = []
omfi_dir = os.path.join(avid_share, 'OMFI MediaFiles')
if os.path.exists(omfi_dir):
media_dirs.append(omfi_dir)
mxf_root = os.path.join(avid_share, 'Avid MediaFiles', 'MXF')
if os.path.exists(mxf_root):
mxf_children = os.listdir(mxf_root)
for child in mxf_children:
fullpath = os.path.join(mxf_root, child)
if os.path.isdir(fullpath):
media_dirs.append(fullpath)
for media_dir in media_dirs:
print '\nChecking %s...' % media_dir
fakepath = '%s_%s_%s' % (media_dir, my_ip, my_name)
print '...fakepath: %s' % fakepath
db = os.path.join(media_dir, 'msmMMOB.mdb')
print '...Checking for %s' % db
if os.path.exists(db):
print '......found %s.' % db
db_mtime = os.stat(db)[stat.ST_MTIME]
newer_file = False
for child in os.listdir(media_dir):
if child == 'msmMMOB.mdb' or child == 'msmFMID.pmr':
continue
child_mtime = os.stat(os.path.join(media_dir, child))[stat.ST_MTIME]
if child_mtime > db_mtime:
print '......found newer file %s' % child
newer_file = True
break
else:
print '......no %s.' % db
newer_file = True
if newer_file:
utime = None # Sets to current time.
print '...Setting fake mtime to NOW. Will trigger re-index.'
else:
mtime = os.stat(media_dir)[stat.ST_MTIME]
utime = (mtime, mtime)
print '...Setting fake mtime to media_dir mtime. No re-index.'
if not os.path.exists(fakepath):
tmp_fakepath = '%s.tmp' % fakepath
open(tmp_fakepath, 'a').close()
os.utime(tmp_fakepath, utime)
os.rename(tmp_fakepath, fakepath)
else:
os.utime(fakepath, utime)
dummy = raw_input("\nPress Enter to finish: ")
| gpl-3.0 |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/boto/dynamodb/layer1.py | 153 | 24057 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from binascii import crc32
import boto
from boto.connection import AWSAuthConnection
from boto.exception import DynamoDBResponseError
from boto.provider import Provider
from boto.dynamodb import exceptions as dynamodb_exceptions
from boto.compat import json
class Layer1(AWSAuthConnection):
"""
This is the lowest-level interface to DynamoDB. Methods at this
layer map directly to API requests and parameters to the methods
are either simple, scalar values or they are the Python equivalent
of the JSON input as defined in the DynamoDB Developer's Guide.
All responses are direct decoding of the JSON response bodies to
Python data structures via the json or simplejson modules.
:ivar throughput_exceeded_events: An integer variable that
keeps a running total of the number of ThroughputExceeded
responses this connection has received from Amazon DynamoDB.
"""
DefaultRegionName = 'us-east-1'
"""The default region name for DynamoDB API."""
ServiceName = 'DynamoDB'
"""The name of the Service"""
Version = '20111205'
"""DynamoDB API version."""
ThruputError = "ProvisionedThroughputExceededException"
"""The error response returned when provisioned throughput is exceeded"""
SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException'
"""The error response returned when session token has expired"""
ConditionalCheckFailedError = 'ConditionalCheckFailedException'
"""The error response returned when a conditional check fails"""
ValidationError = 'ValidationException'
"""The error response returned when an item is invalid in some way"""
ResponseError = DynamoDBResponseError
NumberRetries = 10
"""The number of times an error is retried."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, security_token=None, region=None,
validate_certs=True, validate_checksums=True, profile_name=None):
if not region:
region_name = boto.config.get('DynamoDB', 'region',
self.DefaultRegionName)
for reg in boto.dynamodb.regions():
if reg.name == region_name:
region = reg
break
self.region = region
super(Layer1, self).__init__(self.region.endpoint,
aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug=debug, security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
self.throughput_exceeded_events = 0
self._validate_checksums = boto.config.getbool(
'DynamoDB', 'validate_checksums', validate_checksums)
def _get_session_token(self):
self.provider = Provider(self._provider_type)
self._auth_handler.update_provider(self.provider)
def _required_auth_capability(self):
return ['hmac-v4']
def make_request(self, action, body='', object_hook=None):
"""
:raises: ``DynamoDBExpiredTokenError`` if the security token expires.
"""
headers = {'X-Amz-Target': '%s_%s.%s' % (self.ServiceName,
self.Version, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.0',
'Content-Length': str(len(body))}
http_request = self.build_base_http_request('POST', '/', '/',
{}, headers, body, None)
start = time.time()
response = self._mexe(http_request, sender=None,
override_num_retries=self.NumberRetries,
retry_handler=self._retry_handler)
elapsed = (time.time() - start) * 1000
request_id = response.getheader('x-amzn-RequestId')
boto.log.debug('RequestId: %s' % request_id)
boto.perflog.debug('%s: id=%s time=%sms',
headers['X-Amz-Target'], request_id, int(elapsed))
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
return json.loads(response_body, object_hook=object_hook)
def _retry_handler(self, response, i, next_sleep):
status = None
if response.status == 400:
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
data = json.loads(response_body)
if self.ThruputError in data.get('__type'):
self.throughput_exceeded_events += 1
msg = "%s, retry attempt %s" % (self.ThruputError, i)
next_sleep = self._exponential_time(i)
i += 1
status = (msg, i, next_sleep)
if i == self.NumberRetries:
# If this was our last retry attempt, raise
# a specific error saying that the throughput
# was exceeded.
raise dynamodb_exceptions.DynamoDBThroughputExceededError(
response.status, response.reason, data)
elif self.SessionExpiredError in data.get('__type'):
msg = 'Renewing Session Token'
self._get_session_token()
status = (msg, i + self.num_retries - 1, 0)
elif self.ConditionalCheckFailedError in data.get('__type'):
raise dynamodb_exceptions.DynamoDBConditionalCheckFailedError(
response.status, response.reason, data)
elif self.ValidationError in data.get('__type'):
raise dynamodb_exceptions.DynamoDBValidationError(
response.status, response.reason, data)
else:
raise self.ResponseError(response.status, response.reason,
data)
expected_crc32 = response.getheader('x-amz-crc32')
if self._validate_checksums and expected_crc32 is not None:
boto.log.debug('Validating crc32 checksum for body: %s',
response.read().decode('utf-8'))
actual_crc32 = crc32(response.read()) & 0xffffffff
expected_crc32 = int(expected_crc32)
if actual_crc32 != expected_crc32:
msg = ("The calculated checksum %s did not match the expected "
"checksum %s" % (actual_crc32, expected_crc32))
status = (msg, i + 1, self._exponential_time(i))
return status
def _exponential_time(self, i):
if i == 0:
next_sleep = 0
else:
next_sleep = min(0.05 * (2 ** i),
boto.config.get('Boto', 'max_retry_delay', 60))
return next_sleep
def list_tables(self, limit=None, start_table=None):
"""
Returns a dictionary of results. The dictionary contains
a **TableNames** key whose value is a list of the table names.
The dictionary could also contain a **LastEvaluatedTableName**
key whose value would be the last table name returned if
the complete list of table names was not returned. This
value would then be passed as the ``start_table`` parameter on
a subsequent call to this method.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param start_table: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
data = {}
if limit:
data['Limit'] = limit
if start_table:
data['ExclusiveStartTableName'] = start_table
json_input = json.dumps(data)
return self.make_request('ListTables', json_input)
def describe_table(self, table_name):
"""
Returns information about the table including current
state of the table, primary key schema and when the
table was created.
:type table_name: str
:param table_name: The name of the table to describe.
"""
data = {'TableName': table_name}
json_input = json.dumps(data)
return self.make_request('DescribeTable', json_input)
def create_table(self, table_name, schema, provisioned_throughput):
"""
Add a new table to your account. The table name must be unique
among those associated with the account issuing the request.
This request triggers an asynchronous workflow to begin creating
the table. When the workflow is complete, the state of the
table will be ACTIVE.
:type table_name: str
:param table_name: The name of the table to create.
:type schema: dict
:param schema: A Python version of the KeySchema data structure
as defined by DynamoDB
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'KeySchema': schema,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
response_dict = self.make_request('CreateTable', json_input)
return response_dict
def update_table(self, table_name, provisioned_throughput):
"""
Updates the provisioned throughput for a given table.
:type table_name: str
:param table_name: The name of the table to update.
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
return self.make_request('UpdateTable', json_input)
def delete_table(self, table_name):
"""
Deletes the table and all of it's data. After this request
the table will be in the DELETING state until DynamoDB
completes the delete operation.
:type table_name: str
:param table_name: The name of the table to delete.
"""
data = {'TableName': table_name}
json_input = json.dumps(data)
return self.make_request('DeleteTable', json_input)
def get_item(self, table_name, key, attributes_to_get=None,
consistent_read=False, object_hook=None):
"""
Return a set of attributes for an item that matches
the supplied key.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
"""
data = {'TableName': table_name,
'Key': key}
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if consistent_read:
data['ConsistentRead'] = True
json_input = json.dumps(data)
response = self.make_request('GetItem', json_input,
object_hook=object_hook)
if 'Item' not in response:
raise dynamodb_exceptions.DynamoDBKeyNotFoundError(
"Key does not exist."
)
return response
def batch_get_item(self, request_items, object_hook=None):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type request_items: dict
:param request_items: A Python version of the RequestItems
data structure defined by DynamoDB.
"""
# If the list is empty, return empty response
if not request_items:
return {}
data = {'RequestItems': request_items}
json_input = json.dumps(data)
return self.make_request('BatchGetItem', json_input,
object_hook=object_hook)
def batch_write_item(self, request_items, object_hook=None):
"""
This operation enables you to put or delete several items
across multiple tables in a single API call.
:type request_items: dict
:param request_items: A Python version of the RequestItems
data structure defined by DynamoDB.
"""
data = {'RequestItems': request_items}
json_input = json.dumps(data)
return self.make_request('BatchWriteItem', json_input,
object_hook=object_hook)
def put_item(self, table_name, item,
expected=None, return_values=None,
object_hook=None):
"""
Create a new item or replace an old item with a new
item (including all attributes). If an item already
exists in the specified table with the same primary
key, the new item will completely replace the old item.
You can perform a conditional put by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table in which to put the item.
:type item: dict
:param item: A Python version of the Item data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Item': item}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('PutItem', json_input,
object_hook=object_hook)
def update_item(self, table_name, key, attribute_updates,
expected=None, return_values=None,
object_hook=None):
"""
Edits an existing item's attributes. You can perform a conditional
update (insert a new attribute name-value pair if it doesn't exist,
or replace an existing name-value pair if it has certain expected
attribute values).
:type table_name: str
:param table_name: The name of the table.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB which identifies the item to be updated.
:type attribute_updates: dict
:param attribute_updates: A Python version of the AttributeUpdates
data structure defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Key': key,
'AttributeUpdates': attribute_updates}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('UpdateItem', json_input,
object_hook=object_hook)
def delete_item(self, table_name, key,
expected=None, return_values=None,
object_hook=None):
"""
Delete an item and all of it's attributes by primary key.
You can perform a conditional delete by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName': table_name,
'Key': key}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('DeleteItem', json_input,
object_hook=object_hook)
def query(self, table_name, hash_key_value, range_key_conditions=None,
attributes_to_get=None, limit=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
object_hook=None, count=False):
"""
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name,
'HashKeyValue': hash_key_value}
if range_key_conditions:
data['RangeKeyCondition'] = range_key_conditions
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if count:
data['Count'] = True
if consistent_read:
data['ConsistentRead'] = True
if scan_index_forward:
data['ScanIndexForward'] = True
else:
data['ScanIndexForward'] = False
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Query', json_input,
object_hook=object_hook)
def scan(self, table_name, scan_filter=None,
attributes_to_get=None, limit=None,
exclusive_start_key=None, object_hook=None, count=False):
"""
Perform a scan of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to scan.
:type scan_filter: dict
:param scan_filter: A Python version of the
ScanFilter data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to evaluate.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name}
if scan_filter:
data['ScanFilter'] = scan_filter
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if count:
data['Count'] = True
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Scan', json_input, object_hook=object_hook)
| bsd-3-clause |
sloanyang/depends | third_party/gsutil/gslib/addlhelp/versioning.py | 51 | 10207 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HelpProvider
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
_detailed_help_text = ("""
<B>OVERVIEW</B>
Versioning-enabled buckets maintain an archive of objects, providing a way to
un-delete data that you accidentally deleted, or to retrieve older versions of
your data. You can turn versioning on or off for a bucket at any time. Turning
versioning off leaves existing object versions in place, and simply causes the
bucket to stop accumulating new object versions. In this case, if you upload
to an existing object the current version is overwritten instead of creating
a new version.
Regardless of whether you have enabled versioning on a bucket, every object
has two associated positive integer fields:
- the generation, which is updated when the content of an object is
overwritten.
- the meta-generation, which identifies the metadata generation. It starts
at 1; is updated every time the metadata (e.g., ACL or Content-Type) for a
given content generation is updated; and gets reset when the generation
number changes.
Of these two integers, only the generation is used when working with versioned
data. Both generation and meta-generation can be used with concurrency control
(discussed in a later section).
To work with object versioning in gsutil, you can use a flavor of storage URIs
that that embed the object generation, which we refer to as version-specific URIs.
For example, the version-less object URI:
gs://bucket/object
might have have two versions, with these version-specific URIs:
gs://bucket/object#1360383693690000
gs://bucket/object#1360383802725000
The following sections discuss how to work with versioning and concurrency
control.
<B>OBJECT VERSIONING</B>
You can view, enable, and disable object versioning on a bucket using
the getversioning and setversioning commands. For example:
gsutil setversioning on gs://bucket
will enable versioning for the named bucket. See 'gsutil help getversioning'
and 'gsutil help setversioning' for additional details.
To see all object versions in a versioning-enabled bucket along with
their generation.meta-generation information, use gsutil ls -a:
gsutil ls -a gs://bucket
You can also specify particular objects for which you want to find the
version-specific URI(s), or you can use wildcards:
gsutil ls -a gs://bucket/object1 gs://bucket/images/*.jpg
The generation values form a monotonically increasing sequence as you create
additional object versions. Because of this, the latest object version is
always the last one listed in the gsutil ls output for a particular object.
For example, if a bucket contains these three versions of gs://bucket/object:
gs://bucket/object#1360035307075000
gs://bucket/object#1360101007329000
gs://bucket/object#1360102216114000
then gs://bucket/object#1360102216114000 is the latest version and
gs://bucket/object#1360035307075000 is the oldest available version.
If you specify version-less URIs with gsutil, you will operate on the
latest not-deleted version of an object, for example:
gsutil cp gs://bucket/object ./dir
or
gsutil rm gs://bucket/object
To operate on a specific object version, use a version-specific URI.
For example, suppose the output of the above gsutil ls -a command is:
gs://bucket/object#1360035307075000
gs://bucket/object#1360101007329000
In this case, the command:
gsutil cp gs://bucket/object#1360035307075000 ./dir
will retrieve the second most recent version of the object.
Note that version-specific URIs cannot be the target of the gsutil cp
command (trying to do so will result in an error), because writing to a
versioned object always creates a new version.
If an object has been deleted, it will not show up in a normal gsutil ls
listing (i.e., ls without the -a option). You can restore a deleted object by
running gsutil ls -a to find the available versions, and then copying one of
the version-specific URIs to the version-less URI, for example:
gsutil cp gs://bucket/object#1360101007329000 gs://bucket/object
Note that when you do this it creates a new object version, which will incur
additional charges. You can get rid of the extra copy by deleting the older
version-specfic object:
gsutil rm gs://bucket/object#1360101007329000
Or you can combine the two steps by using the gsutil mv command:
gsutil mv gs://bucket/object#1360101007329000 gs://bucket/object
If you want to remove all versions of an object use the gsutil rm -a option:
gsutil rm -a gs://bucket/object
Note that there is no limit to the number of older versions of an object you
will create if you continue to upload to the same object in a versioning-
enabled bucket. It is your responsibility to delete versions beyond the ones
you want to retain.
<B>CONCURRENCY CONTROL</B>
If you are building an application using Google Cloud Storage, you may need to
be careful about concurrency control. Normally gsutil itself isn't used for
this purpose, but it's possible to write scripts around gsutil that perform
concurrency control.
For example, suppose you want to implement a "rolling update" system using
gsutil, where a periodic job computes some data and uploads it to the cloud.
On each run, the job starts with the data that it computed from last run, and
computes a new value. To make this system robust, you need to have multiple
machines on which the job can run, which raises the possibility that two
simultaneous runs could attempt to update an object at the same time. This
leads to the following potential race condition:
- job 1 computes the new value to be written
- job 2 computes the new value to be written
- job 2 writes the new value
- job 1 writes the new value
In this case, the value that job 1 read is no longer current by the time
it goes to write the updated object, and writing at this point would result
in stale (or, depending on the application, corrupt) data.
To prevent this, you can find the version-specific name of the object that was
created, and then use the information contained in that URI to specify an
x-goog-if-generation-match header on a subsequent gsutil cp command. You can
do this in two steps. First, use the gsutil cp -v option at upload time to get
the version-specific name of the object that was created, for example:
gsutil cp -v file gs://bucket/object
might output:
Created: gs://bucket/object#1360432179236000
You can extract the generation value from this object and then construct a
subsequent gsutil command like this:
gsutil -h x-goog-if-generation-match:1360432179236000 cp newfile \\
gs://bucket/object
This command requests Google Cloud Storage to attempt to upload newfile
but to fail the request if the generation of newfile that is live at the
time of the upload does not match that specified.
If the command you use updates object metadata, you will need to find the
current meta_generation for an object. To do this, use the gsutil ls -a and
-l options. For example, the command:
gsutil ls -l -a gs://bucket/object
will output something like:
64 2013-02-12T19:59:13 gs://bucket/object#1360699153986000 meta_generation=3
1521 2013-02-13T02:04:08 gs://bucket/object#1360721048778000 meta_generation=2
Given this information, you could use the following command to request setting
the ACL on the older version of the object, such that the command will fail
unless that is the current version of the data+metadata:
gsutil -h x-goog-if-generation-match:1360699153986000 -h \\
x-goog-if-metageneration-match:3 setacl public-read \\
gs://bucket/object#1360699153986000
Without adding these headers, the update would simply overwrite the existing
ACL. Note that in contrast, the gsutil chacl command uses these headers
automatically, because it performs a read-modify-write cycle in order to edit
ACLs.
If you want to experiment with how generations and metagenerations work, try
the following. First, upload an object; then use gsutil ls -l -a to list all
versions of the object, along with each version's meta_generation; then re-
upload the object and repeat the gsutil ls -l -a. You should see two object
versions, each with meta_generation=1. Now try setting the ACL, and rerun the
gsutil ls -l -a. You should see the most recent object generation now has
meta_generation=2.
<B>FOR MORE INFORMATION</B>
For more details on how to use versioning and preconditions, see
https://developers.google.com/storage/docs/object-versioning
""")
class CommandOptions(HelpProvider):
"""Additional help about object versioning."""
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'versioning',
# List of help name aliases.
HELP_NAME_ALIASES : ['concurrency', 'concurrency control', 'versioning',
'versions'],
# Type of help:
HELP_TYPE : HelpType.ADDITIONAL_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Working with object versions; concurrency control',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
| gpl-2.0 |
stevehof/CouchPotatoServer | libs/pyutil/cache.py | 106 | 27000 | # Copyright (c) 2002-2010 Zooko "Zooko" Wilcox-O'Hearn
"""
This module offers three implementations of an LRUCache, which is a dict that
drops items according to a Least-Recently-Used policy if the dict exceeds a
fixed maximum size.
Warning: if -O optimizations are not turned on then LRUCache performs
extensive self-analysis in every function call, which can take minutes
and minutes for a large cache. Turn on -O, or comment out ``assert self._assert_invariants()``
"""
import operator
from assertutil import _assert, precondition
from humanreadable import hr
class LRUCache:
"""
An efficient least-recently-used cache. It keeps an LRU queue, and when
the number of items in the cache reaches maxsize, it removes the least
recently used item.
"Looking" at an item, key, or value such as with "has_key()" makes that
item become the most recently used item.
You can also use "refresh()" to explicitly make an item become the most
recently used item.
Adding an item that is already in the dict *does* make it the most-
recently-used item although it does not change the state of the dict
itself.
See also SmallLRUCache (below), which is faster in some cases.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.hs][2]
def __iter__(self):
return self
def next(self):
if self.i is self.c.ts:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = n
return (k, v,)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.hs][2]
def __iter__(self):
return self
def next(self):
if self.i is self.c.ts:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = n
return k
class ValIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.hs][2]
def __iter__(self):
return self
def next(self):
if self.i is self.c.ts:
raise StopIteration
precondition(self.c.d.has_key(self.i), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
(v, p, n,) = self.c.d[self.i]
self.i = n
return v
class Sentinel:
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.msg,)
def __init__(self, initialdata={}, maxsize=128):
precondition(maxsize > 0)
self.m = maxsize+2 # The +2 is for the head and tail nodes.
self.d = {} # k: k, v: [v, prev, next,] # the dict
self.hs = LRUCache.Sentinel("hs")
self.ts = LRUCache.Sentinel("ts")
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.update(initialdata)
assert self._assert_invariants()
def __repr_n__(self, n=None):
s = ["{",]
try:
iter = self.iteritems()
x = iter.next()
s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
i = 1
while (n is None) or (i < n):
x = iter.next()
s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
except StopIteration:
pass
s.append("}")
return ''.join(s)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(),)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),)
def _assert_invariants(self):
_assert(len(self.d) <= self.m, "Size is required to be <= maxsize.", len(self.d), self.m)
_assert((len(self.d) > 2) == (self.d[self.hs][2] is not self.ts) == (self.d[self.ts][1] is not self.hs), "Head and tail point to something other than each other if and only if there is at least one element in the dictionary.", self.hs, self.ts, len(self.d))
foundprevsentinel = 0
foundnextsentinel = 0
for (k, (v, p, n,)) in self.d.iteritems():
_assert(v not in (self.hs, self.ts,))
_assert(p is not self.ts, "A reference to the tail sentinel may not appear in prev.", k, v, p, n)
_assert(n is not self.hs, "A reference to the head sentinel may not appear in next.", k, v, p, n)
_assert(p in self.d, "Each prev is required to appear as a key in the dict.", k, v, p, n)
_assert(n in self.d, "Each next is required to appear as a key in the dict.", k, v, p, n)
if p is self.hs:
foundprevsentinel += 1
_assert(foundprevsentinel <= 2, "No more than two references to the head sentinel may appear as a prev.", k, v, p, n)
if n is self.ts:
foundnextsentinel += 1
_assert(foundnextsentinel <= 2, "No more than one reference to the tail sentinel may appear as a next.", k, v, p, n)
_assert(foundprevsentinel == 2, "A reference to the head sentinel is required appear as a prev (plus a self-referential reference).")
_assert(foundnextsentinel == 2, "A reference to the tail sentinel is required appear as a next (plus a self-referential reference).")
count = 0
for (k, v,) in self.iteritems():
_assert(k not in (self.hs, self.ts,))
count += 1
_assert(count == len(self.d)-2, count, len(self.d)) # -2 for the sentinels
return True
def freshen(self, k, strictkey=False):
assert self._assert_invariants()
if not self.d.has_key(k):
if strictkey:
raise KeyError, k
return
node = self.d[k]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
# move to front
hnode = self.d[self.hs]
node[1] = self.hs
node[2] = hnode[2]
hnode[2] = k
self.d[node[2]][1] = k
assert self._assert_invariants()
def iteritems(self):
return LRUCache.ItemIterator(self)
def itervalues(self):
return LRUCache.ValIterator(self)
def iterkeys(self):
return self.__iter__()
def __iter__(self):
return LRUCache.KeyIterator(self)
def __getitem__(self, key, default=None, strictkey=True):
node = self.d.get(key)
if not node:
if strictkey:
raise KeyError, key
return default
self.freshen(key)
return node[0]
def __setitem__(self, k, v=None):
assert self._assert_invariants()
node = self.d.get(k)
if node:
node[0] = v
self.freshen(k)
return
if len(self.d) == self.m:
# If this insert is going to increase the size of the cache to
# bigger than maxsize.
self.pop()
hnode = self.d[self.hs]
n = hnode[2]
self.d[k] = [v, self.hs, n,]
hnode[2] = k
self.d[n][1] = k
assert self._assert_invariants()
return v
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the value removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if self.d.has_key(key):
node = self.d[key]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
del self.d[key]
assert self._assert_invariants()
return node[0]
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def has_key(self, key):
assert self._assert_invariants()
if self.d.has_key(key):
self.freshen(key)
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def clear(self):
assert self._assert_invariants()
self.d.clear()
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
if len(otherdict) >= (self.m-2): # -2 for the sentinel nodes
# optimization
self.clear()
assert self._assert_invariants()
i = otherdict.iteritems()
try:
while len(self.d) < self.m:
(k, v,) = i.next()
assert self._assert_invariants()
self[k] = v
assert self._assert_invariants()
return self
except StopIteration:
_assert(False, "Internal error -- this should never have happened since the while loop should have terminated first.")
return self
for (k, v,) in otherdict.iteritems():
assert self._assert_invariants()
self[k] = v
assert self._assert_invariants()
def pop(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.ts][1]
self.remove(k)
assert self._assert_invariants()
return k
def popitem(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.ts][1]
val = self.remove(k)
assert self._assert_invariants()
return (k, val,)
def keys_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return t.keys()
def keys(self):
res = [None] * len(self)
i = 0
for k in self.iterkeys():
res[i] = k
i += 1
return res
def values_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return map(operator.__getitem__, t.values(), [0]*len(t))
def values(self):
res = [None] * len(self)
i = 0
for v in self.itervalues():
res[i] = v
i += 1
return res
def items(self):
res = [None] * len(self)
i = 0
for it in self.iteritems():
res[i] = it
i += 1
return res
def __len__(self):
return len(self.d) - 2
def insert(self, key, val=None):
assert self._assert_invariants()
result = self.__setitem__(key, val)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def get(self, key, default=None):
return self.__getitem__(key, default, strictkey=False)
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
class SmallLRUCache(dict):
"""
SmallLRUCache is faster than LRUCache for small sets. How small? That
depends on your machine and which operations you use most often. Use
performance profiling to determine whether the cache class that you are
using makes any difference to the performance of your program, and if it
does, then run "quick_bench()" in test/test_cache.py to see which cache
implementation is faster for the size of your datasets.
A simple least-recently-used cache. It keeps an LRU queue, and
when the number of items in the cache reaches maxsize, it removes
the least recently used item.
"Looking" at an item or a key such as with "has_key()" makes that
item become the most recently used item.
You can also use "refresh()" to explicitly make an item become the most
recently used item.
Adding an item that is already in the dict *does* make it the
most- recently-used item although it does not change the state of
the dict itself.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return (k, dict.__getitem__(self.c, k),)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return k
class ValueIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return dict.__getitem__(self.c, k)
def __init__(self, initialdata={}, maxsize=128):
dict.__init__(self, initialdata)
self._lru = initialdata.keys() # contains keys
self._maxsize = maxsize
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
def _assert_invariants(self):
_assert(len(self._lru) <= self._maxsize, "Size is required to be <= maxsize.")
_assert(len(filter(lambda x: dict.has_key(self, x), self._lru)) == len(self._lru), "Each key in self._lru is required to be in dict.", filter(lambda x: not dict.has_key(self, x), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(filter(lambda x: x in self._lru, self.keys())) == len(self), "Each key in dict is required to be in self._lru.", filter(lambda x: x not in self._lru, self.keys()), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) == len(self), "internal consistency", filter(lambda x: x not in self.keys(), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) <= self._maxsize, "internal consistency", len(self._lru), self._lru, self._maxsize)
return True
def insert(self, key, item=None):
assert self._assert_invariants()
result = self.__setitem__(key, item)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def __setitem__(self, key, item=None):
assert self._assert_invariants()
if dict.has_key(self, key):
self._lru.remove(key)
else:
if len(self._lru) == self._maxsize:
# If this insert is going to increase the size of the cache to bigger than maxsize:
killkey = self._lru.pop(0)
dict.__delitem__(self, killkey)
dict.__setitem__(self, key, item)
self._lru.append(key)
assert self._assert_invariants()
return item
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the object removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if dict.has_key(self, key):
val = dict.__getitem__(self, key)
dict.__delitem__(self, key)
self._lru.remove(key)
assert self._assert_invariants()
return val
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def clear(self):
assert self._assert_invariants()
dict.clear(self)
self._lru = []
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
if len(otherdict) > self._maxsize:
# Handling this special case here makes it possible to implement the
# other more common cases faster below.
dict.clear(self)
self._lru = []
if self._maxsize > (len(otherdict) - self._maxsize):
dict.update(self, otherdict)
while len(self) > self._maxsize:
dict.popitem(self)
else:
for k, v, in otherdict.iteritems():
if len(self) == self._maxsize:
break
dict.__setitem__(self, k, v)
self._lru = dict.keys(self)
assert self._assert_invariants()
return self
for k in otherdict.iterkeys():
if dict.has_key(self, k):
self._lru.remove(k)
self._lru.extend(otherdict.keys())
dict.update(self, otherdict)
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
return self
def has_key(self, key):
assert self._assert_invariants()
if dict.has_key(self, key):
assert key in self._lru, "key: %s, self._lru: %s" % tuple(map(hr, (key, self._lru,)))
self._lru.remove(key)
self._lru.append(key)
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def refresh(self, key, strictkey=True):
"""
@param strictkey: raise a KeyError exception if key isn't present
"""
assert self._assert_invariants()
if not dict.has_key(self, key):
if strictkey:
raise KeyError, key
return
self._lru.remove(key)
self._lru.append(key)
def popitem(self):
if not self._lru:
raise KeyError, 'popitem(): dictionary is empty'
k = self._lru[-1]
obj = self.remove(k)
return (k, obj,)
class LinkedListLRUCache:
"""
This is slower and less featureful than LRUCache. It is included
here for comparison purposes.
Implementation of a length-limited O(1) LRU queue.
Built for and used by PyPE:
http://pype.sourceforge.net
original Copyright 2003 Josiah Carlson.
useful methods and _assert_invariant added by Zooko for testing and benchmarking purposes
"""
class Node:
def __init__(self, prev, me):
self.prev = prev
self.me = me
self.next = None
def __init__(self, initialdata={}, maxsize=128):
self._maxsize = max(maxsize, 1)
self.d = {}
self.first = None
self.last = None
for key, value in initialdata.iteritems():
self[key] = value
def clear(self):
self.d = {}
self.first = None
self.last = None
def update(self, otherdict):
for (k, v,) in otherdict.iteritems():
self[k] = v
def setdefault(self, key, default=None):
if not self.has_key(key):
self[key] = default
return self[key]
def _assert_invariants(self):
def lliterkeys(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me[0]
cur = cur2
def lllen(self):
# Ugh.
acc = 0
for x in lliterkeys(self):
acc += 1
return acc
def llhaskey(self, key):
# Ugh.
for x in lliterkeys(self):
if x is key:
return True
return False
for k in lliterkeys(self):
_assert(self.d.has_key(k), "Each key in the linked list is required to be in the dict.", k)
for k in self.d.iterkeys():
_assert(llhaskey(self, k), "Each key in the dict is required to be in the linked list.", k)
_assert(lllen(self) == len(self.d), "internal consistency", self, self.d)
_assert(len(self.d) <= self._maxsize, "Size is required to be <= maxsize.")
return True
def __contains__(self, obj):
return obj in self.d
def has_key(self, key):
return self.__contains__(key)
def __getitem__(self, obj):
a = self.d[obj].me
self[a[0]] = a[1]
return a[1]
def get(self, key, default=None, strictkey=False):
if not self.has_key(key) and strictkey:
raise KeyError, key
if self.has_key(key):
return self.__getitem__(key)
else:
return default
def __setitem__(self, obj, val):
if obj in self.d:
del self[obj]
nobj = self.Node(self.last, (obj, val))
if self.first is None:
self.first = nobj
if self.last:
self.last.next = nobj
self.last = nobj
self.d[obj] = nobj
if len(self.d) > self._maxsize:
if self.first == self.last:
self.first = None
self.last = None
return
a = self.first
a.next.prev = None
self.first = a.next
a.next = None
del self.d[a.me[0]]
del a
def insert(self, key, item=None):
return self.__setitem__(key, item)
def __delitem__(self, obj, default=None, strictkey=True):
if self.d.has_key(obj):
nobj = self.d[obj]
if nobj.prev:
nobj.prev.next = nobj.next
else:
self.first = nobj.next
if nobj.next:
nobj.next.prev = nobj.prev
else:
self.last = nobj.prev
val = self.d[obj]
del self.d[obj]
return val.me[1]
elif strictkey:
raise KeyError, obj
else:
return default
def remove(self, obj, default=None, strictkey=True):
return self.__delitem__(obj, default=default, strictkey=strictkey)
def __iter__(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me[1]
cur = cur2
def iteritems(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me
cur = cur2
def iterkeys(self):
return iter(self.d)
def itervalues(self):
for i,j in self.iteritems():
yield j
def values(self):
l = []
for v in self.itervalues():
l.append(v)
return l
def keys(self):
return self.d.keys()
def __len__(self):
return self.d.__len__()
def popitem(self):
i = self.last.me
obj = self.remove(i[0])
return obj
| gpl-3.0 |
gerard-geer/LUMA | Server/requesthandler.py | 1 | 20376 | #module: requesthandler.py
# LUMA copyright (C) Gerard Geer 2014-2015
from datetime import datetime
from lightmanager import LightManager
from aliasmanager import AliasManager
from clientmanager import ClientManager
from json import loads
from uuid import uuid4
from singleton import Singleton
@Singleton
class RequestHandler(object):
"""
A Singleton that tidies up all "onRequest" behaviours.
Slots:
_lm (LightManager): Private. A LightManager singleton for use in
parsing lights in requests.
_am (AliasManager): Private. An AliasManager singleton used to convert
client names to client IPs.
_cm (ClientManager) Private. A ClientManager singleton used to
communicate with clients.
"""
__slots__ = ('_lm', '_am', '_cm')
def __init__(self):
self._lm = LightManager.Instance()
self._am = AliasManager.Instance()
self._cm = ClientManager.Instance()
def load(self):
"""
Loads the alias and light manager configurations from file.
Parameters:
None.
Returns:
(bool, bool) Where the first term is the success in loading
the Alias Manager and the second the Light Manager.
Preconditions:
None.
Postconditions:
The Light and Alias Managers configurations are attempted to
be loaded.
"""
return (self._am.load(), self._lm.load())
def _sanitizeLightQuery(self, req):
"""
Sanitizes a light query. This makes sure that a light query is a
JSON Dictionary, then that it has the required keys, and the data
types of those keys' values are correct.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
True if the light query was valid, false otherwise.
Preconditions:
None.
"""
# Make sure the request is a Dictionary.
if not isinstance(req, dict):
print('Not a dictionary.')
return False
# Make sure all required keys are present.
for key in ['uuid', 'query']:
if key not in req.keys():
print(key + ' not in req.keys()')
return False
# Verify the types of the keys' values.
if not isinstance(req['uuid'], str) and \
not isinstance(req['uuid'], unicode):
print('uuid not string. Type: '+str(type(req['uuid'])))
return False
if not isinstance(req['query'], str) and \
not isinstance(req['query'], unicode):
print('query not string. Type: '+str(type(req['query'])))
return False
# Finally after all that checks out we can return True.
return True
def _sanitizeStateQuery(self, req):
"""
Sanitizes a state query. This makes sure that a state query is a
JSON Dictionary, then that it has the required keys, and the data
types of those keys' values are correct.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
True if the light query was valid, false otherwise.
Preconditions:
None.
Postconditions:
None.
"""
# Make sure the request is a Dictionary.
if not isinstance(req, dict):
print('Not a dictionary.')
return False
# Make sure all required keys are present.
for key in ['uuid', 'id']:
if key not in req.keys():
print(key + ' not in req.keys()')
return False
# Verify the types of the keys' values.
if not isinstance(req['uuid'], str) and \
not isinstance(req['uuid'], unicode):
print('uuid not string. Type: '+str(type(req['uuid'])))
return False
if not isinstance(req['id'], str) and \
not isinstance(req['id'], unicode):
print('id not string. Type: '+str(type(req['id'])))
return False
# Finally after all that checks out we can return True.
return True
def _sanitizeAddQuery(self, req):
"""
Sanitizes a light adding query. This makes sure that the query is a
JSON Dictionary, then that it has the required keys, and the data
types of those keys' values are correct.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
True if the light query was valid, false otherwise.
Preconditions:
None.
Postconditions:
None.
"""
# Make sure the request is a Dictionary.
if not isinstance(req, dict):
print('Not a dictionary.')
return False
# Make sure all required keys are present.
for key in ['name', 'client', 'address', 'permitted',
'exists', 'id', 'r_c', 'g_c', 'b_c']:
if key not in req.keys():
print(key + ' not in req.keys()')
return False
# Verify the types of the keys' values.
if not isinstance(req['name'], str) and \
not isinstance(req['name'], unicode):
print('name is not string. Type: '+str(type(req['name'])))
return False
if not isinstance(req['client'], str) and \
not isinstance(req['client'], unicode):
print('client is not string. Type: '+str(type(req['client'])))
return False
if not isinstance(req['address'], str) and \
not isinstance(req['address'], unicode):
print('address is not string. Type: '+str(type(req['address'])))
return False
if not isinstance(req['permitted'], list):
print('permitted is not a list. Type: '+str(type(req['permitted'])))
return False
if not isinstance(req['exists'], bool):
print('exists is not a boolean. Type: '+str(type(req['exists'])))
return False
if req['exists'] and \
not isinstance(req['id'], str) and \
not isinstance(req['id'], unicode):
print('id is not a string. Type: '+str(type(req['string'])))
return False
if not isinstance(req['r_c'], int):
print('r_c is not an integer. Type: '+str(type(req['r_c'])))
return False
if not isinstance(req['g_c'], int):
print('g_c is not an integer. Type: '+str(type(req['g_c'])))
return False
if not isinstance(req['b_c'], int):
print('b_c is not an integer. Type: '+str(type(req['b_c'])))
return False
# Finally after all that checks out we can return True.
return True
def _sanitizeStateUpdate(self, req):
"""
Sanitizes a state update request. This makes sure that the form of the
object passed as the request is valid for the request. Again, this does
key and type testing.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
True if the light query was valid, false otherwise.
Preconditions:
None.
Postconditions:
None.
"""
if not isinstance(req, dict):
return False
for key in ['uuid', 'lights']:
if key not in req.keys():
return False
if not isinstance(req['uuid'],str) and \
not isinstance(req['uuid'],unicode):
return False
if not isinstance(req['lights'], list):
return False
return True
def lightQuery(self, req):
"""
Handles a query for light instances.
Parameters:
req (JSON String): The JSON String that describes the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
None.
"""
# Try to decode the JSON.
try:
if isinstance(req, unicode) or isinstance(req, str):
req = loads(req)
except:
print(' Could not decode JSON of request.')
return {'lights':[]}
# If the request was invalid, we need to transparently return
# nothing.
if not self._sanitizeLightQuery(req):
print(' Request did not pass sanitation.')
return {'lights':[]}
# Print the query.
printedQuery = req['query']
if len(printedQuery) > 71:
printedQuery = printedQuery[:68]+'...'
print(' Query: '+printedQuery)
# Create a place to store the query results.
requested = []
# Get the subset of all allowed lights.
allowed = self._lm.getAllowedSubset(req['uuid'])
# Gets possible aliases should the query be an IP address.
possible = self._am.getPossibleAliases(req['query'])
# If the user just sends us nothing, we just send all that's possible.
if len(req['query']) == 0:
requested.extend(allowed)
else:
for light in allowed:
if req['query'].lower() in light['name'].lower() or \
req['query'].lower() in light['id'].lower() or \
req['query'].lower() in light['client'].lower():
requested.append({'id':light['id'], \
'name':light['name'], \
'client':light['client']})
else:
for alias in possible:
if alias in light['client']:
requested.append({'id':light['id'], \
'name':light['name'], \
'client':light['client']})
print(' Query returned '+str(len(requested))+' lights.')
return {'lights':requested}
def stateQuery(self, req):
"""
Handles a request for a light's state.
Parameters:
req (JSON String): The JSON String that describes the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
The state of the lights supplied is updated, if they exist.
"""
# Try to decode the JSON.
try:
if isinstance(req, unicode) or isinstance(req, str):
req = loads(req)
except:
print(' Could not decode JSON of request.')
return {'success': False,
'message': 'Invalid query.',
'id': None}
# Sanitize the request.
if not self._sanitizeStateQuery(req):
print(' Request did not pass sanitation.')
return {'success': False,
'message': 'Invalid query.',
'id': None}
# Get the light.
light = self._lm.getLight(req['id'])
print(' By UUID: '+req['uuid'])
if light == None:
print(' Light does not exist on server.')
return {'success': False,
'message': 'Light does not exist on server.',
'id': req['id']}
print(' For: '+str(light['id'])+' ('+str(light['name'])+')')
# Check to see if the user can access the light.
if not self._lm.isAllowed(req['uuid'], req['id']):
print(' User tried to access forbidden light.')
return {'success': False,
'message': 'User not allowed to access light.',
'id': req['id']}
# Try to parlay an address from the client alias. If we can't,
# that's another problem.
address = self._am.getAddress(light['client'])
if address == None:
print(' Unrecognized client name/alias.')
return {'success': False,
'message': 'Client alias not recognized.',
'id': req['id'],
'client': light['client']}
# If we can, well, that's good.
print(' To: '+address+' ('+light['client']+')')
res = self._cm.sendRequest(address, 'status', req['id'])
# Now if we were unable to connect to the client we have to adapt.
if res['type'] == 'error':
print(' Could not connect to client. '+res['message'])
return {'success': False,
'message': 'Could not connect to client. Error: '+res['message'],
'id': req['id'],
'client': light['client']}
else:
resp = {'success': res['type'] == 'status',
'message': res['message'],
'client': light['client']}
# Append the keys from the client's response's data to our response
# being sent back to the interface.
resp.update(res['data'])
return resp
def lightUpdate(self, req):
"""
Handles a request to update the state of one or more lights.
Parameters:
req (JSON): A JSON String that should describe the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
The state of the lights supplied is updated, if they exist.
"""
# Since the light update request is already JSON, we don't
# need to worry about parsing it.
if not self._sanitizeStateUpdate(req):
print(' Could not decode JSON request.')
return {'lights':None,
'success': False,
'message': 'Request poorly formed.'}
print(' By UUID: '+req['uuid'])
# Create a list to store our updated states in.
updated = []
# Go through each submitted state and try to abide.
for submitted in req['lights']:
print(' Updating light: '+submitted['id']+' ('+submitted['name']+'):')
# Validate the light.
validationError = self._cm.validateLight(submitted)
# If it fails validation, we have to reject it and move on.
if validationError:
print(' Light failed validation: '+validationError)
submitted['success'] = False
submitted['message'] = validationError
updated.append(submitted)
continue
# At this point we have a valid light. Now we have to
# get our own copy of it.
serverVersion = self._lm.getLight(submitted['id'])
# If we don't have a record of the light well poop.
if not serverVersion:
print(' Light not in server records.')
submitted['success'] = False
submitted['message'] = 'Light not in server records.'
updated.append(submitted)
continue
# If the client doesn't match, we have a problem.
if serverVersion['client'] != submitted['client']:
print(' Client does not match server records.')
submitted['success'] = False
submitted['message'] = 'Client does not match server records.'
updated.append(submitted)
continue
# Finally we can start making headway. Let's get the address of
# where this update goes.
addr = self._am.getAddress(submitted['client'])
# If we can't figure that out, well...
if not addr:
print(' Client not recognized.')
submitted['success'] = False
submitted['message'] = 'Client not recognized.'
updated.append(submitted)
continue
# Now that we have a valid light and a valid address, let's
# send the update.
print(' To: '+str(addr)+' ('+str(submitted['client'])+')')
clientRes = self._cm.sendRequest(addr, 'change', submitted)
# If that action errors out, we have to pass it up the ladder too.
if clientRes['type'] == 'error':
print(' Error in client interaction: '+clientRes['message'])
submitted['success'] = False
submitted['message'] = clientRes['message']
updated.append(submitted)
continue
# At this point we should have finally had a successful update.
print(' Light successfully updated.')
submitted['success'] = True
submitted['message'] = clientRes['message']
updated.append(submitted)
print(' All requested lights handled.')
return {'lights': updated,
'success': True,
'message': None}
def addQuery(self, req):
"""
Handles a query for adding a Light.
Parameters:
req (JSON String): The JSON String that describes the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
None.
"""
# Try to decode the JSON.
try:
if isinstance(req, unicode) or isinstance(req, str):
req = loads(req)
except:
print(' Could not decode JSON of request.')
return {'success':False, 'message':'Could not decode JSON of request.'}
# If the request was invalid, we need to transparently return
# nothing.
if not self._sanitizeAddQuery(req):
print(' Request did not pass sanitation.')
return {'success':False, 'message':'Request did not pass sanitation. '}
# Print some info.
print(' Name: '+str(req['name']))
print(' Client: '+str(req['client']))
print(' Exists: '+str(req['exists']))
if req['exists']:
print(' ID: '+str(req['id']))
print(' # Permitted: '+str(len(req['permitted'])))
print(' Pins: r={0} g={1} b={2}'.format(req['r_c'],req['g_c'],req['b_c']))
# Just to make sure we're not adding a light to a rogue client, we
# make sure we know where it's going.
addr = self._am.getAddress(req['client'])
if addr != req['address']:
print(" Request address '"+str(req['address'])+ \
"' did not match server record")
return {'success':False, 'message':"Request address '"+ \
str(req['address'])+"' did not match server record. "}
if not req['exists']:
# Finally we create the new light ID.
freshID = str(uuid4())
# Need to create the request we're sending to the client.
cReq = {
'name': req['name'],
'id': freshID,
'r_c': req['r_c'],
'g_c': req['g_c'],
'b_c': req['b_c']
}
# Send the client our request.
print(' Adding light to client.')
res = self._cm.sendRequest(addr, 'add', cReq)
# If the request errors out, then the light wasn't added to the client
# and we shan't add it to the server either.
if res['type'] == 'error':
print(' Client error: '+res['message'])
return {'success': False, 'message': res['message']}
# Finally since the response was good we add the light to the server.
print(' Adding new light to server.')
if not self._lm.addLight(freshID, req['name'], req['client'], req['permitted']):
print(' Could not add light to server.')
return {'success':False, 'message':' Could not add light to server.'}
# If the light supposedly already exists, we should check to make sure.
else:
print(' Checking if light actually exists.')
res = self._cm.sendRequest(addr, 'status', req['id'])
if res['type'] != 'status':
print(" The '"+str(req['name'])+"' Light doesn't actually exist"+ \
" on the '"+str(req['client'])+"' client, or the given ID was wrong.")
return {'success':False,
'message':" The '"+str(req['name'])+"' Light doesn't actually exist"+ \
" on the '"+str(req['client'])+"' client, or the given ID was wrong."}
else:
print(' Adding existing light to server.')
self._lm.addLight(req['id'], req['name'], req['client'], req['permitted'])
print(' done.')
return {'success':True, 'message':None}
def addUUID(self, req):
"""
Does what's asked of in the request: Adds a UUID to the given lights.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
The given UUID is added to the given lights if they exist.
"""
return self._lm.addUUIDtoSubset(req['uuid'], req['lights'])
def removeUUID(self, req):
"""
Removes a UUID from one or more lights.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
The given UUID is removed from the given lights, if they exist.
"""
return self._lm.removeUUIDfromSubset(req['uuid'], req['lights'])
def removeLight(self, req):
"""
Removes a light from the light manager.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
The light specified is removed if it existed.
"""
if not self._lm.isAllowed(req['uuid'], req['id']):
return {'success':False, \
'message': 'User not allowed to access light.'}
if self._lm.deleteLight(req['id']):
return {'success':True,'message':None}
return {'success':False,'message':'Light does not exist.'}
def backup(self, req):
"""
Backs up aliases and lights.
Parameters:
req (JSON): The Dictionary that contains the request.
Returns:
A dictionary containing the response to the request.
Preconditions:
The request be a valid JSON object for this request type.
Postconditions:
The light specified is removed if it existed.
"""
timest_amp = str(datetime.now())
self._lm.save("REMOTE LIGHT BACKUP "+timest_amp+".json")
self._am.save("REMOTE ALIAS BACKUP "+timest_amp+".json")
return {'success':True, 'message':None}
def printInfo(self):
"""
Prints info about the Request Handler and its managers.
Parameters:
None.
Returns:
None.
Preconditions:
The Request Handler is initialized.
Postconditions:
Info is printed.
"""
# Get listings of the clients and lights on the server.
clients = self._am.getPossibleAliases('')
lights = self._lm.getLightCatalog()
# Print those listings.
print('\n Clients: ('+str(len(clients))+')')
for client in self._am.getPossibleAliases(''):
print(' '+client)
print('\n Lights: ('+str(len(lights.keys()))+')')
for key in lights.keys():
print(" %-20s : "%str(key)+str(lights[key]))
| gpl-3.0 |
cjld/adventures_in_opencl | experiments/radix/nv/radix.py | 5 | 9590 | #http://documen.tician.de/pyopencl/
import pyopencl as cl
import numpy as np
import struct
import timing
timings = timing.Timing()
#ctx = cl.create_some_context()
mf = cl.mem_flags
class Radix:
def __init__(self, max_elements, cta_size, dtype):
self.WARP_SIZE = 32
self.SCAN_WG_SIZE = 256
self.MIN_LARGE_ARRAY_SIZE = 4 * self.SCAN_WG_SIZE
self.bit_step = 4
self.cta_size = cta_size
self.uintsz = dtype.itemsize
plat = cl.get_platforms()[0]
device = plat.get_devices()[0]
self.ctx = cl.Context(devices=[device])
self.queue = cl.CommandQueue(self.ctx, device)
self.loadProgram()
if (max_elements % (cta_size * 4)) == 0:
num_blocks = max_elements / (cta_size * 4)
else:
num_blocks = max_elements / (cta_size * 4) + 1
#print "num_blocks: ", num_blocks
self.d_tempKeys = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements)
self.d_tempValues = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements)
self.mCounters = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
self.mCountersSum = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
self.mBlockOffsets = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
numscan = max_elements/2/cta_size*16
#print "numscan", numscan
if numscan >= self.MIN_LARGE_ARRAY_SIZE:
#MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE 1024
self.scan_buffer = cl.Buffer(self.ctx, mf.READ_WRITE, size = self.uintsz * numscan / 1024)
def loadProgram(self):
print "build scan"
f = open("Scan_b.cl", 'r')
fstr = "".join(f.readlines())
self.scan_prg = cl.Program(self.ctx, fstr).build()
print "build radix"
f = open("RadixSort.cl", 'r')
fstr = "".join(f.readlines())
self.radix_prg = cl.Program(self.ctx, fstr).build()
@timings("Radix Sort")
def sort(self, num, keys_np, values_np):
self.keys = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=keys_np)
self.values = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=values_np)
key_bits = keys_np.dtype.itemsize * 8
#print "numElements", num
#print "key_bits", key_bits
#print "bit_step", self.bit_step
i = 0
while key_bits > i*self.bit_step:
#print "i*bit_step", i*self.bit_step
self.step(self.bit_step, i*self.bit_step, num);
i += 1;
self.queue.finish()
cl.enqueue_read_buffer(self.queue, self.keys, keys_np).wait()
cl.enqueue_read_buffer(self.queue, self.values, values_np).wait()
return keys_np, values_np
@timings("Radix: step")
def step(self, nbits, startbit, num):
self.blocks(nbits, startbit, num)
self.queue.finish()
self.find_offsets(startbit, num)
self.queue.finish()
array_length = num/2/self.cta_size*16
#print "array length in step", array_length
if array_length < self.MIN_LARGE_ARRAY_SIZE:
self.naive_scan(num)
else:
self.scan(self.mCountersSum, self.mCounters, 1, array_length);
self.queue.finish()
#self.naive_scan(num)
self.reorder(startbit, num)
self.queue.finish()
@timings("Radix: blocks")
def blocks(self, nbits, startbit, num):
totalBlocks = num/4/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
blocks_args = ( self.keys,
self.values,
self.d_tempKeys,
self.d_tempValues,
np.uint32(nbits),
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(4*self.cta_size*self.uintsz),
cl.LocalMemory(4*self.cta_size*self.uintsz)
)
self.radix_prg.radixSortBlocksKeysValues(self.queue, global_size, local_size, *(blocks_args)).wait()
#self.radix_prg.radixSortBlocksKeysOnly(self.queue, global_size, local_size, *(blocks_args)).wait()
@timings("Radix: find offsets")
def find_offsets(self, startbit, num):
totalBlocks = num/2/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
offsets_args = ( self.d_tempKeys,
self.d_tempValues,
self.mCounters,
self.mBlockOffsets,
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(2*self.cta_size*self.uintsz),
)
self.radix_prg.findRadixOffsets(self.queue, global_size, local_size, *(offsets_args)).wait()
@timings("Radix: naive scan")
def naive_scan(self, num):
nhist = num/2/self.cta_size*16
global_size = (nhist,)
local_size = (nhist,)
extra_space = nhist / 16 #NUM_BANKS defined as 16 in RadixSort.cpp
shared_mem_size = self.uintsz * (nhist + extra_space)
scan_args = ( self.mCountersSum,
self.mCounters,
np.uint32(nhist),
cl.LocalMemory(2*shared_mem_size)
)
self.radix_prg.scanNaive(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Radix: scan")
def scan(self, dst, src, batch_size, array_length):
self.scan_local1( dst,
src,
batch_size * array_length / (4 * self.SCAN_WG_SIZE),
4 * self.SCAN_WG_SIZE)
self.queue.finish()
self.scan_local2( dst,
src,
batch_size,
array_length / (4 * self.SCAN_WG_SIZE))
self.queue.finish()
self.scan_update(dst, batch_size * array_length / (4 * self.SCAN_WG_SIZE))
self.queue.finish()
@timings("Scan: local1")
def scan_local1(self, dst, src, n, size):
global_size = (n * size / 4,)
local_size = (self.SCAN_WG_SIZE,)
scan_args = ( dst,
src,
cl.LocalMemory(2 * self.SCAN_WG_SIZE * self.uintsz),
np.uint32(size)
)
self.scan_prg.scanExclusiveLocal1(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: local2")
def scan_local2(self, dst, src, n, size):
elements = n * size
dividend = elements
divisor = self.SCAN_WG_SIZE
if dividend % divisor == 0:
global_size = (dividend,)
else:
global_size = (dividend - dividend % divisor + divisor,)
local_size = (self.SCAN_WG_SIZE, )
scan_args = ( self.scan_buffer,
dst,
src,
cl.LocalMemory(2 * self.SCAN_WG_SIZE * self.uintsz),
np.uint32(elements),
np.uint32(size)
)
self.scan_prg.scanExclusiveLocal2(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: update")
def scan_update(self, dst, n):
global_size = (n * self.SCAN_WG_SIZE,)
local_size = (self.SCAN_WG_SIZE,)
scan_args = ( dst,
self.scan_buffer
)
self.scan_prg.uniformUpdate(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: reorder")
def reorder(self, startbit, num):
totalBlocks = num/2/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
reorder_args = ( self.keys,
self.values,
self.d_tempKeys,
self.d_tempValues,
self.mBlockOffsets,
self.mCountersSum,
self.mCounters,
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(2*self.cta_size*self.uintsz),
cl.LocalMemory(2*self.cta_size*self.uintsz)
)
self.radix_prg.reorderDataKeysValues(self.queue, global_size, local_size, *(reorder_args))
#self.radix_prg.reorderDataKeysOnly(self.queue, global_size, local_size, *(reorder_args))
if __name__ == "__main__":
n = 1048576*2
#n = 32768*2
#n = 16384
#n = 8192
hashes = np.ndarray((n,1), dtype=np.uint32)
indices = np.ndarray((n,1), dtype=np.uint32)
for i in xrange(0,n):
hashes[i] = n - i
indices[i] = i
npsorted = np.sort(hashes,0)
print "hashes before:", hashes[0:20].T
print "indices before: ", indices[0:20].T
radix = Radix(n, 128, hashes.dtype)
#num_to_sort = 32768
num_to_sort = n
hashes, indices = radix.sort(num_to_sort, hashes, indices)
print "hashes after:", hashes[0:20].T
print "indices after: ", indices[0:20].T
print np.linalg.norm(hashes - npsorted)
print timings
| mit |
safwanrahman/mozillians | vendor-local/lib/python/kombu/transport/filesystem.py | 15 | 5565 | """
kombu.transport.filesystem
==========================
Transport using the file system as the message store.
"""
from __future__ import absolute_import
from Queue import Empty
from anyjson import loads, dumps
import os
import shutil
import time
import uuid
import tempfile
from . import virtual
from kombu.exceptions import StdConnectionError, StdChannelError
from kombu.utils import cached_property
VERSION = (1, 0, 0)
__version__ = ".".join(map(str, VERSION))
# needs win32all to work on Windows
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
# 0 is the default
LOCK_SH = 0 # noqa
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # noqa
__overlapped = pywintypes.OVERLAPPED()
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped)
elif os.name == 'posix':
import fcntl
from fcntl import LOCK_EX, LOCK_SH, LOCK_NB # noqa
def lock(file, flags): # noqa
fcntl.flock(file.fileno(), flags)
def unlock(file): # noqa
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
raise RuntimeError(
'Filesystem plugin only defined for NT and POSIX platforms')
class Channel(virtual.Channel):
def _put(self, queue, payload, **kwargs):
"""Put `message` onto `queue`."""
filename = '%s_%s.%s.msg' % (int(round(time.time() * 1000)),
uuid.uuid4(), queue)
filename = os.path.join(self.data_folder_out, filename)
try:
f = open(filename, 'wb')
lock(f, LOCK_EX)
f.write(dumps(payload))
except (IOError, OSError):
raise StdChannelError(
'Filename [%s] could not be placed into folder.' % filename)
finally:
unlock(f)
f.close()
def _get(self, queue):
"""Get next message from `queue`."""
queue_find = '.' + queue + '.msg'
folder = os.listdir(self.data_folder_in)
folder = sorted(folder)
while len(folder) > 0:
filename = folder.pop(0)
# only handle message for the requested queue
if filename.find(queue_find) < 0:
continue
if self.store_processed:
processed_folder = self.processed_folder
else:
processed_folder = tempfile.gettempdir()
try:
# move the file to the tmp/processed folder
shutil.move(os.path.join(self.data_folder_in, filename),
processed_folder)
except IOError:
pass # file could be locked, or removed in meantime so ignore
filename = os.path.join(processed_folder, filename)
try:
f = open(filename, 'rb')
payload = f.read()
f.close()
if not self.store_processed:
os.remove(filename)
except (IOError, OSError):
raise StdChannelError(
'Filename [%s] could not be read from queue.' % filename)
return loads(payload)
raise Empty()
def _purge(self, queue):
"""Remove all messages from `queue`."""
count = 0
queue_find = '.' + queue + '.msg'
folder = os.listdir(self.data_folder_in)
while len(folder) > 0:
filename = folder.pop()
try:
# only purge messages for the requested queue
if filename.find(queue_find) < 0:
continue
filename = os.path.join(self.data_folder_in, filename)
os.remove(filename)
count += 1
except OSError:
# we simply ignore its existence, as it was probably
# processed by another worker
pass
return count
def _size(self, queue):
"""Return the number of messages in `queue` as an :class:`int`."""
count = 0
queue_find = "." + queue + '.msg'
folder = os.listdir(self.data_folder_in)
while len(folder) > 0:
filename = folder.pop()
# only handle message for the requested queue
if filename.find(queue_find) < 0:
continue
count += 1
return count
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def data_folder_in(self):
return self.transport_options.get('data_folder_in', 'data_in')
@cached_property
def data_folder_out(self):
return self.transport_options.get('data_folder_out', 'data_out')
@cached_property
def store_processed(self):
return self.transport_options.get('store_processed', False)
@cached_property
def processed_folder(self):
return self.transport_options.get('processed_folder', 'processed')
class Transport(virtual.Transport):
Channel = Channel
default_port = 0
connection_errors = (StdConnectionError, )
channel_errors = (StdChannelError, )
driver_type = 'filesystem'
driver_name = 'filesystem'
def driver_version(self):
return 'N/A'
| bsd-3-clause |
sencha/chromium-spacewalk | tools/usb_gadget/hid_gadget_test.py | 54 | 8516 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
import hid_constants
import hid_descriptors
import hid_gadget
import usb_constants
report_desc = hid_descriptors.ReportDescriptor(
hid_descriptors.UsagePage(0xFF00), # Vendor Defined
hid_descriptors.Usage(0x00),
hid_descriptors.Collection(
hid_constants.CollectionType.APPLICATION,
hid_descriptors.LogicalMinimum(0, force_length=1),
hid_descriptors.LogicalMaximum(255, force_length=2),
hid_descriptors.ReportSize(8),
hid_descriptors.ReportCount(8),
hid_descriptors.Input(hid_descriptors.Data,
hid_descriptors.Variable,
hid_descriptors.Absolute,
hid_descriptors.BufferedBytes),
hid_descriptors.Output(hid_descriptors.Data,
hid_descriptors.Variable,
hid_descriptors.Absolute,
hid_descriptors.BufferedBytes),
hid_descriptors.Feature(hid_descriptors.Data,
hid_descriptors.Variable,
hid_descriptors.Absolute,
hid_descriptors.BufferedBytes)
)
)
combo_report_desc = hid_descriptors.ReportDescriptor(
hid_descriptors.ReportID(1),
report_desc,
hid_descriptors.ReportID(2),
report_desc
)
class HidGadgetTest(unittest.TestCase):
def test_bad_intervals(self):
with self.assertRaisesRegexp(ValueError, 'Full speed'):
hid_gadget.HidGadget(report_desc, features={}, interval_ms=50000,
vendor_id=0, product_id=0)
with self.assertRaisesRegexp(ValueError, 'High speed'):
hid_gadget.HidGadget(report_desc, features={}, interval_ms=5000,
vendor_id=0, product_id=0)
def test_get_string_descriptor(self):
g = hid_gadget.HidGadget(report_desc=report_desc, features={},
vendor_id=0, product_id=0)
g.AddStringDescriptor(2, 'HID Gadget')
desc = g.ControlRead(0x80, 6, 0x0302, 0x0409, 255)
self.assertEquals(desc, '\x16\x03H\0I\0D\0 \0G\0a\0d\0g\0e\0t\0')
def test_get_report_descriptor(self):
g = hid_gadget.HidGadget(report_desc=report_desc, features={},
vendor_id=0, product_id=0)
desc = g.ControlRead(0x81, 6, 0x2200, 0, 63)
self.assertEquals(desc, report_desc)
def test_set_idle(self):
g = hid_gadget.HidGadget(report_desc=report_desc, features={},
vendor_id=0, product_id=0)
self.assertTrue(g.ControlWrite(0x21, 0x0A, 0, 0, ''))
def test_class_wrong_target(self):
g = hid_gadget.HidGadget(report_desc=report_desc, features={},
vendor_id=0, product_id=0)
self.assertIsNone(g.ControlRead(0xA0, 0, 0, 0, 0)) # Device
self.assertIsNone(g.ControlRead(0xA1, 0, 0, 1, 0)) # Interface 1
self.assertIsNone(g.ControlWrite(0x20, 0, 0, 0, '')) # Device
self.assertIsNone(g.ControlWrite(0x21, 0, 0, 1, '')) # Interface 1
def test_send_report_zero(self):
g = hid_gadget.HidGadget(report_desc=report_desc, features={},
vendor_id=0, product_id=0)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.SendReport(0, 'Hello world!')
chip.SendPacket.assert_called_once_with(0x81, 'Hello world!')
def test_send_multiple_reports(self):
g = hid_gadget.HidGadget(report_desc=report_desc, features={},
vendor_id=0, product_id=0)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.SendReport(1, 'Hello!')
g.SendReport(2, 'World!')
chip.SendPacket.assert_has_calls([
mock.call(0x81, '\x01Hello!'),
mock.call(0x81, '\x02World!'),
])
class TestFeature(hid_gadget.HidFeature):
def SetInputReport(self, data):
self.input_report = data
return True
def SetOutputReport(self, data):
self.output_report = data
return True
def SetFeatureReport(self, data):
self.feature_report = data
return True
def GetInputReport(self):
return 'Input report.'
def GetOutputReport(self):
return 'Output report.'
def GetFeatureReport(self):
return 'Feature report.'
class HidFeatureTest(unittest.TestCase):
def test_disconnected(self):
feature = TestFeature()
with self.assertRaisesRegexp(RuntimeError, 'not connected'):
feature.SendReport('Hello world!')
def test_send_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
feature.SendReport('Hello world!')
chip.SendPacket.assert_called_once_with(0x81, '\x01Hello world!')
g.Disconnected()
def test_get_bad_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
self.assertIsNone(g.ControlRead(0xA1, 1, 0x0102, 0, 8))
def test_set_bad_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
self.assertIsNone(g.ControlWrite(0x21, 0x09, 0x0102, 0, 'Hello!'))
def test_get_input_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
report = g.ControlRead(0xA1, 1, 0x0101, 0, 8)
self.assertEquals(report, 'Input re')
def test_set_input_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
self.assertTrue(g.ControlWrite(0x21, 0x09, 0x0101, 0, 'Hello!'))
self.assertEquals(feature.input_report, 'Hello!')
def test_get_output_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
report = g.ControlRead(0xA1, 1, 0x0201, 0, 8)
self.assertEquals(report, 'Output r')
def test_set_output_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
self.assertTrue(g.ControlWrite(0x21, 0x09, 0x0201, 0, 'Hello!'))
self.assertEquals(feature.output_report, 'Hello!')
def test_receive_interrupt(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.ReceivePacket(0x01, '\x01Hello!')
self.assertFalse(chip.HaltEndpoint.called)
self.assertEquals(feature.output_report, 'Hello!')
def test_receive_interrupt_report_zero(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={0: feature},
vendor_id=0, product_id=0)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.ReceivePacket(0x01, 'Hello!')
self.assertFalse(chip.HaltEndpoint.called)
self.assertEquals(feature.output_report, 'Hello!')
def test_receive_bad_interrupt(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.ReceivePacket(0x01, '\x00Hello!')
chip.HaltEndpoint.assert_called_once_with(0x01)
def test_get_feature_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
report = g.ControlRead(0xA1, 1, 0x0301, 0, 8)
self.assertEquals(report, 'Feature ')
def test_set_feature_report(self):
feature = TestFeature()
g = hid_gadget.HidGadget(report_desc, features={1: feature},
vendor_id=0, product_id=0)
self.assertTrue(g.ControlWrite(0x21, 0x09, 0x0301, 0, 'Hello!'))
self.assertEquals(feature.feature_report, 'Hello!')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
StevenBlack/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/main.py | 118 | 5154 | # Copyright (c) 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
from optparse import make_option
import os
import threading
from webkitpy.common.config.ports import DeprecatedPort
from webkitpy.common.host import Host
from webkitpy.common.net.irc import ircproxy
from webkitpy.common.net.statusserver import StatusServer
from webkitpy.tool.multicommandtool import MultiCommandTool
from webkitpy.tool import commands
class WebKitPatch(MultiCommandTool, Host):
global_options = [
make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="enable all logging"),
make_option("-d", "--directory", action="append", dest="patch_directories", default=[], help="Directory to look at for changed files"),
make_option("--status-host", action="store", dest="status_host", type="string", help="Hostname (e.g. localhost or commit.webkit.org) where status updates should be posted."),
make_option("--bot-id", action="store", dest="bot_id", type="string", help="Identifier for this bot (if multiple bots are running for a queue)"),
make_option("--irc-password", action="store", dest="irc_password", type="string", help="Password to use when communicating via IRC."),
make_option("--seconds-to-sleep", action="store", default=120, type="int", help="Number of seconds to sleep in the task queue."),
make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...)."),
]
def __init__(self, path):
MultiCommandTool.__init__(self)
Host.__init__(self)
self._path = path
self.status_server = StatusServer()
self.wakeup_event = threading.Event()
self._irc = None
self._deprecated_port = None
def deprecated_port(self):
return self._deprecated_port
def path(self):
return self._path
def ensure_irc_connected(self, irc_delegate):
if not self._irc:
self._irc = ircproxy.IRCProxy(irc_delegate)
def irc(self):
# We don't automatically construct IRCProxy here because constructing
# IRCProxy actually connects to IRC. We want clients to explicitly
# connect to IRC.
return self._irc
def command_completed(self):
if self._irc:
self._irc.disconnect()
def should_show_in_main_help(self, command):
if not command.show_in_main_help:
return False
if command.requires_local_commits:
return self.scm().supports_local_commits()
return True
# FIXME: This may be unnecessary since we pass global options to all commands during execute() as well.
def handle_global_options(self, options):
self.initialize_scm(options.patch_directories)
if options.status_host:
self.status_server.set_host(options.status_host)
if options.bot_id:
self.status_server.set_bot_id(options.bot_id)
if options.irc_password:
self.irc_password = options.irc_password
# If options.port is None, we'll get the default port for this platform.
self._deprecated_port = DeprecatedPort.port(options.port)
def should_execute_command(self, command):
if command.requires_local_commits and not self.scm().supports_local_commits():
failure_reason = "%s requires local commits using %s in %s." % (command.name, self.scm().display_name(), self.scm().checkout_root)
return (False, failure_reason)
return (True, None)
| bsd-3-clause |
shiitakeo/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/__init__.py | 309 | 1692 | # urllib3/__init__.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| lgpl-2.1 |
haripradhan/MissionPlanner | Lib/site-packages/scipy/optimize/setup.py | 51 | 2214 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('optimize',parent_package, top_path)
config.add_library('minpack',sources=[join('minpack','*f')])
config.add_extension('_minpack',
sources=['_minpackmodule.c'],
libraries=['minpack'],
depends=["minpack.h","__minpack.h"])
config.add_library('rootfind',
sources=[join('Zeros','*.c')],
headers=[join('Zeros','zeros.h')])
config.add_extension('_zeros',
sources=['zeros.c'],
libraries=['rootfind'])
lapack = get_info('lapack_opt')
sources=['lbfgsb.pyf','routines.f']
config.add_extension('_lbfgsb',
sources=[join('lbfgsb',x) for x in sources],
**lapack)
sources=['moduleTNC.c','tnc.c']
config.add_extension('moduleTNC',
sources=[join('tnc',x) for x in sources],
depends=[join('tnc','tnc.h')])
config.add_extension('_cobyla',
sources=[join('cobyla',x) for x in ['cobyla.pyf',
'cobyla2.f',
'trstlp.f']])
sources = ['minpack2.pyf', 'dcsrch.f', 'dcstep.f']
config.add_extension('minpack2',
sources=[join('minpack2',x) for x in sources])
sources = ['slsqp.pyf', 'slsqp_optmz.f']
config.add_extension('_slsqp', sources=[join('slsqp', x) for x in sources])
config.add_extension('_nnls', sources=[join('nnls', x) \
for x in ["nnls.f","nnls.pyf"]])
config.add_data_dir('tests')
config.add_data_dir('benchmarks')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| gpl-3.0 |
GodBlessPP/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/encodings/__init__.py | 764 | 5067 | """ Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to normalized encoding
names as defined in the normalize_encoding() function below, e.g.
'utf-8' must be implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> codecs.CodecInfo object
The getregentry() API must return a CodecInfo object with encoder, decoder,
incrementalencoder, incrementaldecoder, streamwriter and streamreader
atttributes which adhere to the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be normalized encoding
names as defined by normalize_encoding().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
from . import aliases
_cache = {}
_unknown = '--unknown--'
_import_tail = ['*']
_aliases = aliases.aliases
class CodecRegistryError(LookupError, SystemError):
pass
def normalize_encoding(encoding):
""" Normalize an encoding name.
Normalization works as follows: all non-alphanumeric
characters except the dot used for Python package names are
collapsed and replaced with a single underscore, e.g. ' -;#'
becomes '_'. Leading and trailing underscores are removed.
Note that encoding names should be ASCII only; if they do use
non-ASCII characters, these must be Latin-1 compatible.
"""
if isinstance(encoding, bytes):
encoding = str(encoding, "ascii")
chars = []
punct = False
for c in encoding:
if c.isalnum() or c == '.':
if punct and chars:
chars.append('_')
chars.append(c)
punct = False
else:
punct = True
return ''.join(chars)
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError('module "%s" (%s) failed to register'
% (mod.__name__, mod.__file__))
if not callable(entry[0]) or not callable(entry[1]) or \
(entry[2] is not None and not callable(entry[2])) or \
(entry[3] is not None and not callable(entry[3])) or \
(len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
(len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
raise CodecRegistryError('incompatible codecs in module "%s" (%s)'
% (mod.__name__, mod.__file__))
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if alias not in _aliases:
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)
| gpl-3.0 |
Shouqun/node-gn | tools/depot_tools/git_retry.py | 2 | 5795 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic retry wrapper for Git operations.
This is largely DEPRECATED in favor of the Infra Git wrapper:
https://chromium.googlesource.com/infra/infra/+/master/go/src/infra/tools/git
"""
import logging
import optparse
import os
import subprocess
import sys
import threading
import time
from git_common import GIT_EXE, GIT_TRANSIENT_ERRORS_RE
class TeeThread(threading.Thread):
def __init__(self, fd, out_fd, name):
super(TeeThread, self).__init__(name='git-retry.tee.%s' % (name,))
self.data = None
self.fd = fd
self.out_fd = out_fd
def run(self):
chunks = []
for line in self.fd:
chunks.append(line)
self.out_fd.write(line)
self.data = ''.join(chunks)
class GitRetry(object):
logger = logging.getLogger('git-retry')
DEFAULT_DELAY_SECS = 3.0
DEFAULT_RETRY_COUNT = 5
def __init__(self, retry_count=None, delay=None, delay_factor=None):
self.retry_count = retry_count or self.DEFAULT_RETRY_COUNT
self.delay = max(delay, 0) if delay else 0
self.delay_factor = max(delay_factor, 0) if delay_factor else 0
def shouldRetry(self, stderr):
m = GIT_TRANSIENT_ERRORS_RE.search(stderr)
if not m:
return False
self.logger.info("Encountered known transient error: [%s]",
stderr[m.start(): m.end()])
return True
@staticmethod
def execute(*args):
args = (GIT_EXE,) + args
proc = subprocess.Popen(
args,
stderr=subprocess.PIPE,
)
stderr_tee = TeeThread(proc.stderr, sys.stderr, 'stderr')
# Start our process. Collect/tee 'stdout' and 'stderr'.
stderr_tee.start()
try:
proc.wait()
except KeyboardInterrupt:
proc.kill()
raise
finally:
stderr_tee.join()
return proc.returncode, None, stderr_tee.data
def computeDelay(self, iteration):
"""Returns: the delay (in seconds) for a given iteration
The first iteration has a delay of '0'.
Args:
iteration: (int) The iteration index (starting with zero as the first
iteration)
"""
if (not self.delay) or (iteration == 0):
return 0
if self.delay_factor == 0:
# Linear delay
return iteration * self.delay
# Exponential delay
return (self.delay_factor ** (iteration - 1)) * self.delay
def __call__(self, *args):
returncode = 0
for i in xrange(self.retry_count):
# If the previous run failed and a delay is configured, delay before the
# next run.
delay = self.computeDelay(i)
if delay > 0:
self.logger.info("Delaying for [%s second(s)] until next retry", delay)
time.sleep(delay)
self.logger.debug("Executing subprocess (%d/%d) with arguments: %s",
(i+1), self.retry_count, args)
returncode, _, stderr = self.execute(*args)
self.logger.debug("Process terminated with return code: %d", returncode)
if returncode == 0:
break
if not self.shouldRetry(stderr):
self.logger.error("Process failure was not known to be transient; "
"terminating with return code %d", returncode)
break
return returncode
def main(args):
# If we're using the Infra Git wrapper, do nothing here.
# https://chromium.googlesource.com/infra/infra/+/master/go/src/infra/tools/git
if 'INFRA_GIT_WRAPPER' in os.environ:
# Remove Git's execution path from PATH so that our call-through re-invokes
# the Git wrapper.
# See crbug.com/721450
env = os.environ.copy()
git_exec = subprocess.check_output([GIT_EXE, '--exec-path']).strip()
env['PATH'] = os.pathsep.join([
elem for elem in env.get('PATH', '').split(os.pathsep)
if elem != git_exec])
return subprocess.call([GIT_EXE] + args, env=env)
parser = optparse.OptionParser()
parser.disable_interspersed_args()
parser.add_option('-v', '--verbose',
action='count', default=0,
help="Increase verbosity; can be specified multiple times")
parser.add_option('-c', '--retry-count', metavar='COUNT',
type=int, default=GitRetry.DEFAULT_RETRY_COUNT,
help="Number of times to retry (default=%default)")
parser.add_option('-d', '--delay', metavar='SECONDS',
type=float, default=GitRetry.DEFAULT_DELAY_SECS,
help="Specifies the amount of time (in seconds) to wait "
"between successive retries (default=%default). This "
"can be zero.")
parser.add_option('-D', '--delay-factor', metavar='FACTOR',
type=int, default=2,
help="The exponential factor to apply to delays in between "
"successive failures (default=%default). If this is "
"zero, delays will increase linearly. Set this to "
"one to have a constant (non-increasing) delay.")
opts, args = parser.parse_args(args)
# Configure logging verbosity
if opts.verbose == 0:
logging.getLogger().setLevel(logging.WARNING)
elif opts.verbose == 1:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.DEBUG)
# Execute retries
retry = GitRetry(
retry_count=opts.retry_count,
delay=opts.delay,
delay_factor=opts.delay_factor,
)
return retry(*args)
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.WARNING)
try:
sys.exit(main(sys.argv[2:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| mit |
hoosteeno/mozillians | vendor-local/lib/python/unidecode/x088.py | 252 | 4645 | data = (
'Ci ', # 0x00
'Xiang ', # 0x01
'She ', # 0x02
'Luo ', # 0x03
'Qin ', # 0x04
'Ying ', # 0x05
'Chai ', # 0x06
'Li ', # 0x07
'Ze ', # 0x08
'Xuan ', # 0x09
'Lian ', # 0x0a
'Zhu ', # 0x0b
'Ze ', # 0x0c
'Xie ', # 0x0d
'Mang ', # 0x0e
'Xie ', # 0x0f
'Qi ', # 0x10
'Rong ', # 0x11
'Jian ', # 0x12
'Meng ', # 0x13
'Hao ', # 0x14
'Ruan ', # 0x15
'Huo ', # 0x16
'Zhuo ', # 0x17
'Jie ', # 0x18
'Bin ', # 0x19
'He ', # 0x1a
'Mie ', # 0x1b
'Fan ', # 0x1c
'Lei ', # 0x1d
'Jie ', # 0x1e
'La ', # 0x1f
'Mi ', # 0x20
'Li ', # 0x21
'Chun ', # 0x22
'Li ', # 0x23
'Qiu ', # 0x24
'Nie ', # 0x25
'Lu ', # 0x26
'Du ', # 0x27
'Xiao ', # 0x28
'Zhu ', # 0x29
'Long ', # 0x2a
'Li ', # 0x2b
'Long ', # 0x2c
'Feng ', # 0x2d
'Ye ', # 0x2e
'Beng ', # 0x2f
'Shang ', # 0x30
'Gu ', # 0x31
'Juan ', # 0x32
'Ying ', # 0x33
'[?] ', # 0x34
'Xi ', # 0x35
'Can ', # 0x36
'Qu ', # 0x37
'Quan ', # 0x38
'Du ', # 0x39
'Can ', # 0x3a
'Man ', # 0x3b
'Jue ', # 0x3c
'Jie ', # 0x3d
'Zhu ', # 0x3e
'Zha ', # 0x3f
'Xie ', # 0x40
'Huang ', # 0x41
'Niu ', # 0x42
'Pei ', # 0x43
'Nu ', # 0x44
'Xin ', # 0x45
'Zhong ', # 0x46
'Mo ', # 0x47
'Er ', # 0x48
'Ke ', # 0x49
'Mie ', # 0x4a
'Xi ', # 0x4b
'Xing ', # 0x4c
'Yan ', # 0x4d
'Kan ', # 0x4e
'Yuan ', # 0x4f
'[?] ', # 0x50
'Ling ', # 0x51
'Xuan ', # 0x52
'Shu ', # 0x53
'Xian ', # 0x54
'Tong ', # 0x55
'Long ', # 0x56
'Jie ', # 0x57
'Xian ', # 0x58
'Ya ', # 0x59
'Hu ', # 0x5a
'Wei ', # 0x5b
'Dao ', # 0x5c
'Chong ', # 0x5d
'Wei ', # 0x5e
'Dao ', # 0x5f
'Zhun ', # 0x60
'Heng ', # 0x61
'Qu ', # 0x62
'Yi ', # 0x63
'Yi ', # 0x64
'Bu ', # 0x65
'Gan ', # 0x66
'Yu ', # 0x67
'Biao ', # 0x68
'Cha ', # 0x69
'Yi ', # 0x6a
'Shan ', # 0x6b
'Chen ', # 0x6c
'Fu ', # 0x6d
'Gun ', # 0x6e
'Fen ', # 0x6f
'Shuai ', # 0x70
'Jie ', # 0x71
'Na ', # 0x72
'Zhong ', # 0x73
'Dan ', # 0x74
'Ri ', # 0x75
'Zhong ', # 0x76
'Zhong ', # 0x77
'Xie ', # 0x78
'Qi ', # 0x79
'Xie ', # 0x7a
'Ran ', # 0x7b
'Zhi ', # 0x7c
'Ren ', # 0x7d
'Qin ', # 0x7e
'Jin ', # 0x7f
'Jun ', # 0x80
'Yuan ', # 0x81
'Mei ', # 0x82
'Chai ', # 0x83
'Ao ', # 0x84
'Niao ', # 0x85
'Hui ', # 0x86
'Ran ', # 0x87
'Jia ', # 0x88
'Tuo ', # 0x89
'Ling ', # 0x8a
'Dai ', # 0x8b
'Bao ', # 0x8c
'Pao ', # 0x8d
'Yao ', # 0x8e
'Zuo ', # 0x8f
'Bi ', # 0x90
'Shao ', # 0x91
'Tan ', # 0x92
'Ju ', # 0x93
'He ', # 0x94
'Shu ', # 0x95
'Xiu ', # 0x96
'Zhen ', # 0x97
'Yi ', # 0x98
'Pa ', # 0x99
'Bo ', # 0x9a
'Di ', # 0x9b
'Wa ', # 0x9c
'Fu ', # 0x9d
'Gun ', # 0x9e
'Zhi ', # 0x9f
'Zhi ', # 0xa0
'Ran ', # 0xa1
'Pan ', # 0xa2
'Yi ', # 0xa3
'Mao ', # 0xa4
'Tuo ', # 0xa5
'Na ', # 0xa6
'Kou ', # 0xa7
'Xian ', # 0xa8
'Chan ', # 0xa9
'Qu ', # 0xaa
'Bei ', # 0xab
'Gun ', # 0xac
'Xi ', # 0xad
'Ne ', # 0xae
'Bo ', # 0xaf
'Horo ', # 0xb0
'Fu ', # 0xb1
'Yi ', # 0xb2
'Chi ', # 0xb3
'Ku ', # 0xb4
'Ren ', # 0xb5
'Jiang ', # 0xb6
'Jia ', # 0xb7
'Cun ', # 0xb8
'Mo ', # 0xb9
'Jie ', # 0xba
'Er ', # 0xbb
'Luo ', # 0xbc
'Ru ', # 0xbd
'Zhu ', # 0xbe
'Gui ', # 0xbf
'Yin ', # 0xc0
'Cai ', # 0xc1
'Lie ', # 0xc2
'Kamishimo ', # 0xc3
'Yuki ', # 0xc4
'Zhuang ', # 0xc5
'Dang ', # 0xc6
'[?] ', # 0xc7
'Kun ', # 0xc8
'Ken ', # 0xc9
'Niao ', # 0xca
'Shu ', # 0xcb
'Jia ', # 0xcc
'Kun ', # 0xcd
'Cheng ', # 0xce
'Li ', # 0xcf
'Juan ', # 0xd0
'Shen ', # 0xd1
'Pou ', # 0xd2
'Ge ', # 0xd3
'Yi ', # 0xd4
'Yu ', # 0xd5
'Zhen ', # 0xd6
'Liu ', # 0xd7
'Qiu ', # 0xd8
'Qun ', # 0xd9
'Ji ', # 0xda
'Yi ', # 0xdb
'Bu ', # 0xdc
'Zhuang ', # 0xdd
'Shui ', # 0xde
'Sha ', # 0xdf
'Qun ', # 0xe0
'Li ', # 0xe1
'Lian ', # 0xe2
'Lian ', # 0xe3
'Ku ', # 0xe4
'Jian ', # 0xe5
'Fou ', # 0xe6
'Chan ', # 0xe7
'Bi ', # 0xe8
'Gun ', # 0xe9
'Tao ', # 0xea
'Yuan ', # 0xeb
'Ling ', # 0xec
'Chi ', # 0xed
'Chang ', # 0xee
'Chou ', # 0xef
'Duo ', # 0xf0
'Biao ', # 0xf1
'Liang ', # 0xf2
'Chang ', # 0xf3
'Pei ', # 0xf4
'Pei ', # 0xf5
'Fei ', # 0xf6
'Yuan ', # 0xf7
'Luo ', # 0xf8
'Guo ', # 0xf9
'Yan ', # 0xfa
'Du ', # 0xfb
'Xi ', # 0xfc
'Zhi ', # 0xfd
'Ju ', # 0xfe
'Qi ', # 0xff
)
| bsd-3-clause |
oew1v07/scikit-image | doc/examples/plot_convex_hull.py | 4 | 1481 | """
===========
Convex Hull
===========
The convex hull of a binary image is the set of pixels included in the
smallest convex polygon that surround all white pixels in the input.
In this example, we show how the input pixels (white) get filled in by the
convex hull (white and grey).
A good overview of the algorithm is given on `Steve Eddin's blog
<http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/>`__.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import convex_hull_image
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=float)
original_image = np.copy(image)
chull = convex_hull_image(image)
image[chull] += 1
# image is now:
#[[ 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 2. 0. 0. 0. 0.]
# [ 0. 0. 0. 2. 1. 2. 0. 0. 0.]
# [ 0. 0. 2. 1. 1. 1. 2. 0. 0.]
# [ 0. 2. 1. 1. 1. 1. 1. 2. 0.]
# [ 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
ax1.set_title('Original picture')
ax1.imshow(original_image, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_xticks([]), ax1.set_yticks([])
ax2.set_title('Transformed picture')
ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax2.set_xticks([]), ax2.set_yticks([])
plt.show()
| bsd-3-clause |
Cube777/dotgit | tests/test_main.py | 1 | 8238 | import os
from dotgit.__main__ import main
from dotgit.git import Git
class TestMain:
def setup_repo(self, tmp_path, flist):
home = tmp_path / 'home'
repo = tmp_path / 'repo'
os.makedirs(home)
os.makedirs(repo)
main(args=['init'], cwd=str(repo))
with open(repo / 'filelist', 'w') as f:
f.write(flist)
return home, repo
def test_init_home(self, tmp_path, caplog):
home = tmp_path / 'home'
repo = tmp_path / 'repo'
os.makedirs(home)
os.makedirs(repo)
assert main(args=['init'], cwd=str(home), home=str(home)) != 0
assert 'safety checks failed' in caplog.text
def test_init(self, tmp_path, caplog):
home = tmp_path / 'home'
repo = tmp_path / 'repo'
os.makedirs(home)
os.makedirs(repo)
assert main(args=['init'], cwd=str(repo), home=str(home)) == 0
git = Git(str(repo))
assert (repo / '.git').is_dir()
assert (repo / 'filelist').is_file()
assert git.last_commit() == 'Added filelist'
assert 'existing git repo' not in caplog.text
assert 'existing filelist' not in caplog.text
def test_reinit(self, tmp_path, caplog):
home = tmp_path / 'home'
repo = tmp_path / 'repo'
os.makedirs(home)
os.makedirs(repo)
assert main(args=['init'], cwd=str(repo), home=str(home)) == 0
assert main(args=['init'], cwd=str(repo), home=str(home)) == 0
git = Git(str(repo))
assert (repo / '.git').is_dir()
assert (repo / 'filelist').is_file()
assert git.last_commit() == 'Added filelist'
assert len(git.commits()) == 1
assert 'existing git repo' in caplog.text
assert 'existing filelist' in caplog.text
def test_update_home_norepo(self, tmp_path):
home, repo = self.setup_repo(tmp_path, 'file')
open(home / 'file', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
assert (home / 'file').is_symlink()
assert repo in (home / 'file').resolve().parents
def test_update_home_repo(self, tmp_path, monkeypatch):
home, repo = self.setup_repo(tmp_path, 'file')
open(home / 'file', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
monkeypatch.setattr('builtins.input', lambda p: '0')
os.remove(home / 'file')
open(home / 'file', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
assert (home / 'file').is_symlink()
assert repo in (home / 'file').resolve().parents
def test_restore_nohome_repo(self, tmp_path):
home, repo = self.setup_repo(tmp_path, 'file')
open(home / 'file', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
assert (home / 'file').is_symlink()
assert repo in (home / 'file').resolve().parents
os.remove(home / 'file')
assert main(args=['restore'], cwd=str(repo), home=str(home)) == 0
assert (home / 'file').is_symlink()
assert repo in (home / 'file').resolve().parents
def test_restore_home_repo(self, tmp_path, monkeypatch):
home, repo = self.setup_repo(tmp_path, 'file')
open(home / 'file', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
monkeypatch.setattr('builtins.input', lambda p: 'y')
os.remove(home / 'file')
open(home / 'file', 'w').close()
assert main(args=['restore'], cwd=str(repo), home=str(home)) == 0
assert (home / 'file').is_symlink()
assert repo in (home / 'file').resolve().parents
def test_restore_hard_nohome_repo(self, tmp_path):
home, repo = self.setup_repo(tmp_path, 'file')
data = 'test data'
with open(home / 'file', 'w') as f:
f.write(data)
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
assert (home / 'file').is_symlink()
assert repo in (home / 'file').resolve().parents
os.remove(home / 'file')
assert not (home / 'file').exists()
assert main(args=['restore', '--hard'],
cwd=str(repo), home=str(home)) == 0
assert (home / 'file').exists()
assert not (home / 'file').is_symlink()
assert (home / 'file').read_text() == data
def test_clean(self, tmp_path):
home, repo = self.setup_repo(tmp_path, 'file')
open(home / 'file', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
assert (home / 'file').is_symlink()
assert repo in (home / 'file').resolve().parents
assert main(args=['clean'], cwd=str(repo), home=str(home)) == 0
assert not (home / 'file').exists()
def test_dry_run(self, tmp_path):
home, repo = self.setup_repo(tmp_path, 'file')
open(home / 'file', 'w').close()
assert main(args=['update', '--dry-run'],
cwd=str(repo), home=str(home)) == 0
assert (home / 'file').exists()
assert not (home / 'file').is_symlink()
def test_commit_nochanges(self, tmp_path, caplog):
home, repo = self.setup_repo(tmp_path, '')
assert main(args=['commit'], cwd=str(repo), home=str(home)) == 0
assert 'no changes detected' in caplog.text
def test_commit_changes(self, tmp_path, caplog):
home, repo = self.setup_repo(tmp_path, 'file')
git = Git(str(repo))
open(home / 'file', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
assert main(args=['commit'], cwd=str(repo), home=str(home)) == 0
assert 'not changes detected' not in caplog.text
assert 'filelist' in git.last_commit()
def test_commit_ignore(self, tmp_path, caplog):
home, repo = self.setup_repo(tmp_path, 'file')
git = Git(str(repo))
open(home / 'file', 'w').close()
os.makedirs(repo / '.plugins')
open(repo / '.plugins' / 'plugf', 'w').close()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
assert main(args=['commit'], cwd=str(repo), home=str(home)) == 0
assert 'not changes detected' not in caplog.text
assert 'filelist' in git.last_commit()
assert 'plugf' not in git.last_commit()
def test_diff(self, tmp_path, capsys):
home, repo = self.setup_repo(tmp_path, 'file\nfile2')
(home / 'file').touch()
(home / 'file2').touch()
ret = main(args=['update', '--hard'], cwd=str(repo), home=str(home))
assert ret == 0
(home / 'file').write_text('hello world')
ret = main(args=['diff', '--hard'], cwd=str(repo), home=str(home))
assert ret == 0
captured = capsys.readouterr()
assert captured.out == ('added dotfiles/plain/common/file\n'
'added dotfiles/plain/common/file2\n'
'modified filelist\n\n'
'plain-plugin updates not yet in repo:\n'
f'modified {home / "file"}\n')
def test_passwd_empty(self, tmp_path, monkeypatch):
home, repo = self.setup_repo(tmp_path, 'file\nfile2')
password = 'password123'
monkeypatch.setattr('getpass.getpass', lambda prompt: password)
assert not (repo / '.plugins' / 'encrypt' / 'passwd').exists()
assert main(args=['passwd'], cwd=str(repo), home=str(home)) == 0
assert (repo / '.plugins' / 'encrypt' / 'passwd').exists()
def test_passwd_nonempty(self, tmp_path, monkeypatch):
home, repo = self.setup_repo(tmp_path, 'file|encrypt')
password = 'password123'
monkeypatch.setattr('getpass.getpass', lambda prompt: password)
(home / 'file').touch()
assert main(args=['update'], cwd=str(repo), home=str(home)) == 0
repo_file = repo / 'dotfiles' / 'encrypt' / 'common' / 'file'
txt = repo_file.read_text()
assert main(args=['passwd'], cwd=str(repo), home=str(home)) == 0
assert repo_file.read_text() != txt
| gpl-2.0 |
jdugge/QGIS | python/plugins/processing/gui/ConfigDialog.py | 26 | 19241 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ConfigDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt, QEvent
from qgis.PyQt.QtWidgets import (QFileDialog,
QStyle,
QMessageBox,
QStyledItemDelegate,
QLineEdit,
QWidget,
QToolButton,
QHBoxLayout,
QComboBox,
QPushButton,
QApplication)
from qgis.PyQt.QtGui import (QIcon,
QStandardItemModel,
QStandardItem,
QCursor)
from qgis.gui import (QgsDoubleSpinBox,
QgsSpinBox,
QgsOptionsPageWidget,
QgsOptionsDialogHighlightWidget)
from qgis.core import NULL, QgsApplication, QgsSettings
from qgis.utils import OverrideCursor
from processing.core.ProcessingConfig import (ProcessingConfig,
settingsWatcher,
Setting)
from processing.core.Processing import Processing
from processing.gui.DirectorySelectorDialog import DirectorySelectorDialog
from processing.gui.menus import defaultMenuEntries, menusSettingsGroup
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgConfig.ui'))
class ConfigOptionsPage(QgsOptionsPageWidget):
def __init__(self, parent):
super(ConfigOptionsPage, self).__init__(parent)
self.config_widget = ConfigDialog(False)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setMargin(0)
self.setLayout(layout)
layout.addWidget(self.config_widget)
self.setObjectName('processingOptions')
self.highlightWidget = ProcessingTreeHighlight(self.config_widget)
self.registerHighlightWidget(self.highlightWidget)
def apply(self):
self.config_widget.accept()
def helpKey(self):
return 'processing/index.html'
class ProcessingTreeHighlight(QgsOptionsDialogHighlightWidget):
def __init__(self, config_dialog):
super(ProcessingTreeHighlight, self).__init__(config_dialog.tree)
self.config_dialog = config_dialog
def highlightText(self, text):
return self.config_dialog.textChanged(text)
def searchText(self, text):
return self.config_dialog.textChanged(text)
def reset(self):
self.config_dialog.textChanged('')
class ConfigDialog(BASE, WIDGET):
def __init__(self, showSearch=True):
super(ConfigDialog, self).__init__(None)
self.setupUi(self)
self.groupIcon = QgsApplication.getThemeIcon('mIconFolder.svg')
self.model = QStandardItemModel()
self.tree.setModel(self.model)
self.delegate = SettingDelegate()
self.tree.setItemDelegateForColumn(1, self.delegate)
if showSearch:
if hasattr(self.searchBox, 'setPlaceholderText'):
self.searchBox.setPlaceholderText(QApplication.translate('ConfigDialog', 'Search…'))
self.searchBox.textChanged.connect(self.textChanged)
else:
self.searchBox.hide()
self.fillTree()
self.saveMenus = False
self.tree.expanded.connect(self.itemExpanded)
self.auto_adjust_columns = True
def textChanged(self, text=None):
if text is not None:
text = str(text.lower())
else:
text = str(self.searchBox.text().lower())
found = self._filterItem(self.model.invisibleRootItem(), text)
self.auto_adjust_columns = False
if text:
self.tree.expandAll()
else:
self.tree.collapseAll()
self.adjustColumns()
self.auto_adjust_columns = True
if text:
return found
else:
self.tree.collapseAll()
return False
def _filterItem(self, item, text, forceShow=False):
if item.hasChildren():
show = forceShow or isinstance(item, QStandardItem) and bool(text) and (text in item.text().lower())
for i in range(item.rowCount()):
child = item.child(i)
show = self._filterItem(child, text, forceShow) or show
self.tree.setRowHidden(item.row(), item.index().parent(), not show)
return show
elif isinstance(item, QStandardItem):
show = forceShow or bool(text) and (text in item.text().lower())
self.tree.setRowHidden(item.row(), item.index().parent(), not show)
return show
def fillTree(self):
self.fillTreeUsingProviders()
def fillTreeUsingProviders(self):
self.items = {}
self.model.clear()
self.model.setHorizontalHeaderLabels([self.tr('Setting'),
self.tr('Value')])
settings = ProcessingConfig.getSettings()
rootItem = self.model.invisibleRootItem()
"""
Filter 'General', 'Models' and 'Scripts' items
"""
priorityKeys = [self.tr('General'), self.tr('Models'), self.tr('Scripts')]
for group in priorityKeys:
groupItem = QStandardItem(group)
icon = ProcessingConfig.getGroupIcon(group)
groupItem.setIcon(icon)
groupItem.setEditable(False)
emptyItem = QStandardItem()
emptyItem.setEditable(False)
rootItem.insertRow(0, [groupItem, emptyItem])
if group not in settings:
continue
# add menu item only if it has any search matches
for setting in settings[group]:
if setting.hidden or setting.name.startswith("MENU_"):
continue
labelItem = QStandardItem(setting.description)
labelItem.setIcon(icon)
labelItem.setEditable(False)
self.items[setting] = SettingItem(setting)
groupItem.insertRow(0, [labelItem, self.items[setting]])
"""
Filter 'Providers' items
"""
providersItem = QStandardItem(self.tr('Providers'))
icon = QgsApplication.getThemeIcon("/processingAlgorithm.svg")
providersItem.setIcon(icon)
providersItem.setEditable(False)
emptyItem = QStandardItem()
emptyItem.setEditable(False)
rootItem.insertRow(0, [providersItem, emptyItem])
for group in list(settings.keys()):
if group in priorityKeys or group == menusSettingsGroup:
continue
groupItem = QStandardItem(group)
icon = ProcessingConfig.getGroupIcon(group)
groupItem.setIcon(icon)
groupItem.setEditable(False)
for setting in settings[group]:
if setting.hidden:
continue
labelItem = QStandardItem(setting.description)
labelItem.setIcon(icon)
labelItem.setEditable(False)
self.items[setting] = SettingItem(setting)
groupItem.insertRow(0, [labelItem, self.items[setting]])
emptyItem = QStandardItem()
emptyItem.setEditable(False)
providersItem.appendRow([groupItem, emptyItem])
"""
Filter 'Menus' items
"""
self.menusItem = QStandardItem(self.tr('Menus'))
icon = QIcon(os.path.join(pluginPath, 'images', 'menu.png'))
self.menusItem.setIcon(icon)
self.menusItem.setEditable(False)
emptyItem = QStandardItem()
emptyItem.setEditable(False)
rootItem.insertRow(0, [self.menusItem, emptyItem])
button = QPushButton(self.tr('Reset to defaults'))
button.clicked.connect(self.resetMenusToDefaults)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(button)
layout.addStretch()
widget = QWidget()
widget.setLayout(layout)
self.tree.setIndexWidget(emptyItem.index(), widget)
for provider in QgsApplication.processingRegistry().providers():
providerDescription = provider.name()
groupItem = QStandardItem(providerDescription)
icon = provider.icon()
groupItem.setIcon(icon)
groupItem.setEditable(False)
for alg in provider.algorithms():
algItem = QStandardItem(alg.displayName())
algItem.setIcon(icon)
algItem.setEditable(False)
try:
settingMenu = ProcessingConfig.settings["MENU_" + alg.id()]
settingButton = ProcessingConfig.settings["BUTTON_" + alg.id()]
settingIcon = ProcessingConfig.settings["ICON_" + alg.id()]
except:
continue
self.items[settingMenu] = SettingItem(settingMenu)
self.items[settingButton] = SettingItem(settingButton)
self.items[settingIcon] = SettingItem(settingIcon)
menuLabelItem = QStandardItem("Menu path")
menuLabelItem.setEditable(False)
buttonLabelItem = QStandardItem("Add button in toolbar")
buttonLabelItem.setEditable(False)
iconLabelItem = QStandardItem("Icon")
iconLabelItem.setEditable(False)
emptyItem = QStandardItem()
emptyItem.setEditable(False)
algItem.insertRow(0, [menuLabelItem, self.items[settingMenu]])
algItem.insertRow(0, [buttonLabelItem, self.items[settingButton]])
algItem.insertRow(0, [iconLabelItem, self.items[settingIcon]])
groupItem.insertRow(0, [algItem, emptyItem])
emptyItem = QStandardItem()
emptyItem.setEditable(False)
self.menusItem.appendRow([groupItem, emptyItem])
self.tree.sortByColumn(0, Qt.AscendingOrder)
self.adjustColumns()
def resetMenusToDefaults(self):
for provider in QgsApplication.processingRegistry().providers():
for alg in provider.algorithms():
d = defaultMenuEntries.get(alg.id(), "")
setting = ProcessingConfig.settings["MENU_" + alg.id()]
item = self.items[setting]
item.setData(d, Qt.EditRole)
self.saveMenus = True
def accept(self):
qsettings = QgsSettings()
for setting in list(self.items.keys()):
if setting.group != menusSettingsGroup or self.saveMenus:
if isinstance(setting.value, bool):
setting.setValue(self.items[setting].checkState() == Qt.Checked)
else:
try:
setting.setValue(str(self.items[setting].text()))
except ValueError as e:
QMessageBox.warning(self, self.tr('Wrong value'),
self.tr('Wrong value for parameter "{0}":\n\n{1}').format(setting.description, str(e)))
return
setting.save(qsettings)
with OverrideCursor(Qt.WaitCursor):
for p in QgsApplication.processingRegistry().providers():
p.refreshAlgorithms()
settingsWatcher.settingsChanged.emit()
def itemExpanded(self, idx):
if idx == self.menusItem.index():
self.saveMenus = True
if self.auto_adjust_columns:
self.adjustColumns()
def adjustColumns(self):
self.tree.resizeColumnToContents(0)
self.tree.resizeColumnToContents(1)
class SettingItem(QStandardItem):
def __init__(self, setting):
QStandardItem.__init__(self)
self.setting = setting
self.setData(setting, Qt.UserRole)
if isinstance(setting.value, bool):
self.setCheckable(True)
self.setEditable(False)
if setting.value:
self.setCheckState(Qt.Checked)
else:
self.setCheckState(Qt.Unchecked)
else:
self.setData(setting.value, Qt.EditRole)
class SettingDelegate(QStyledItemDelegate):
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def createEditor(self, parent, options, index):
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.FOLDER:
return FileDirectorySelector(parent, placeholder=setting.placeholder)
elif setting.valuetype == Setting.FILE:
return FileDirectorySelector(parent, True, setting.placeholder)
elif setting.valuetype == Setting.SELECTION:
combo = QComboBox(parent)
combo.addItems(setting.options)
return combo
elif setting.valuetype == Setting.MULTIPLE_FOLDERS:
return MultipleDirectorySelector(parent, setting.placeholder)
else:
value = self.convertValue(index.model().data(index, Qt.EditRole))
if isinstance(value, int):
spnBox = QgsSpinBox(parent)
spnBox.setRange(-999999999, 999999999)
return spnBox
elif isinstance(value, float):
spnBox = QgsDoubleSpinBox(parent)
spnBox.setRange(-999999999.999999, 999999999.999999)
spnBox.setDecimals(6)
return spnBox
elif isinstance(value, str):
lineEdit = QLineEdit(parent)
lineEdit.setPlaceholderText(setting.placeholder)
return lineEdit
def setEditorData(self, editor, index):
value = self.convertValue(index.model().data(index, Qt.EditRole))
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.SELECTION:
editor.setCurrentIndex(editor.findText(value))
elif setting.valuetype in (Setting.FLOAT, Setting.INT):
editor.setValue(value)
else:
editor.setText(value)
def setModelData(self, editor, model, index):
value = self.convertValue(index.model().data(index, Qt.EditRole))
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.SELECTION:
model.setData(index, editor.currentText(), Qt.EditRole)
else:
if isinstance(value, str):
model.setData(index, editor.text(), Qt.EditRole)
else:
model.setData(index, editor.value(), Qt.EditRole)
def sizeHint(self, option, index):
return QgsSpinBox().sizeHint()
def eventFilter(self, editor, event):
if event.type() == QEvent.FocusOut and hasattr(editor, 'canFocusOut'):
if not editor.canFocusOut:
return False
return QStyledItemDelegate.eventFilter(self, editor, event)
def convertValue(self, value):
if value is None or value == NULL:
return ""
try:
return int(value)
except:
try:
return float(value)
except:
return str(value)
class FileDirectorySelector(QWidget):
def __init__(self, parent=None, selectFile=False, placeholder=""):
QWidget.__init__(self, parent)
# create gui
self.btnSelect = QToolButton()
self.btnSelect.setText('…')
self.lineEdit = QLineEdit()
self.lineEdit.setPlaceholderText(placeholder)
self.hbl = QHBoxLayout()
self.hbl.setMargin(0)
self.hbl.setSpacing(0)
self.hbl.addWidget(self.lineEdit)
self.hbl.addWidget(self.btnSelect)
self.setLayout(self.hbl)
self.canFocusOut = False
self.selectFile = selectFile
self.setFocusPolicy(Qt.StrongFocus)
self.btnSelect.clicked.connect(self.select)
def select(self):
lastDir = ''
if not self.selectFile:
selectedPath = QFileDialog.getExistingDirectory(None,
self.tr('Select directory'), lastDir,
QFileDialog.ShowDirsOnly)
else:
selectedPath, selected_filter = QFileDialog.getOpenFileName(None,
self.tr('Select file'), lastDir, self.tr('All files (*)')
)
if not selectedPath:
return
self.lineEdit.setText(selectedPath)
self.canFocusOut = True
def text(self):
return self.lineEdit.text()
def setText(self, value):
self.lineEdit.setText(value)
class MultipleDirectorySelector(QWidget):
def __init__(self, parent=None, placeholder=""):
QWidget.__init__(self, parent)
# create gui
self.btnSelect = QToolButton()
self.btnSelect.setText('…')
self.lineEdit = QLineEdit()
self.lineEdit.setPlaceholderText(placeholder)
self.hbl = QHBoxLayout()
self.hbl.setMargin(0)
self.hbl.setSpacing(0)
self.hbl.addWidget(self.lineEdit)
self.hbl.addWidget(self.btnSelect)
self.setLayout(self.hbl)
self.canFocusOut = False
self.setFocusPolicy(Qt.StrongFocus)
self.btnSelect.clicked.connect(self.select)
def select(self):
text = self.lineEdit.text()
if text != '':
items = text.split(';')
else:
items = []
dlg = DirectorySelectorDialog(None, items)
if dlg.exec_():
text = dlg.value()
self.lineEdit.setText(text)
self.canFocusOut = True
def text(self):
return self.lineEdit.text()
def setText(self, value):
self.lineEdit.setText(value)
| gpl-2.0 |
hsoft/xibless | xibless/gen.py | 1 | 7317 | import sys
import os
import os.path as op
import tempfile
import shutil
from subprocess import Popen
from datetime import datetime
from . import globalvars
from .base import CodeTemplate, GeneratedItem, owner, NSApp, const, defaults
from .types import Action, NLSTR
from .control import ControlSize, TextAlignment
from .view import View, Box, Pack, Size, Rect
from .font import Font, FontFamily, FontSize, FontTrait
from .color import Color
from .formatter import NumberFormatter, NumberStyle
from .menu import Menu, MainMenu
from .window import Window, Panel, PanelStyle
from .button import Button, Checkbox
from .textfield import TextField, Label, SearchField
from .textview import TextView
from .popup import Popup
from .combo import Combobox
from .radio import RadioButtons
from .progress import ProgressIndicator
from .image import ImageView
from .tabview import TabView
from .table import TableView, ListView, OutlineView
from .splitview import SplitView
from .segment import SegmentedControl
from .slider import Slider
from .layout import HLayout, VLayout, VHLayout
from .util import modified_after
try:
execfile
except NameError:
# We're in Python 3
def execfile(file, globals=globals(), locals=locals()):
with open(file, "rt", encoding='utf-8') as fh:
exec(fh.read()+"\n", globals, locals)
AUTOGEN_COMMENT = "/* This unit was automatically generated by xibless v{version} on {timestamp}. */\n\n"
HEADER_TMPL = """
#import "XiblessSupport.h"
$ownerimport$
$funcsig$;
"""
UNIT_TMPL = """
$mainimport$
$ownerimport$
$funcsig$
{
$contents$
return result;
}
"""
# When running a UI (in `runmode`), we take one UI script out of its context, so
# any owner assignment will make code compilation fail. Since we just want to preview the UI, we
# don't need those assignments, so we skip them. Moreover, we revert all instance which had their
# OBJC_CLASS attribute set because this is also going to make complication fail.
def generate(modulePath, dest, runmode=False, localizationTable=None, args=None):
if args is None:
args = {}
dest_basename, dest_ext = op.splitext(op.basename(dest))
if dest_ext == '.h':
dest_header = None
else:
if not dest_ext:
dest += '.m'
dest_header = op.splitext(dest)[0] + '.h'
globalvars.globalLocalizationTable = localizationTable
globalvars.globalRunMode = runmode
globalvars.globalGenerationCounter.reset()
to_include = {'owner', 'NSApp', 'const', 'defaults', 'View', 'Box', 'Size', 'Rect',
'ControlSize', 'Menu', 'MainMenu', 'Action', 'Window', 'Panel', 'PanelStyle', 'Button',
'Checkbox', 'Label', 'TextField', 'TextView', 'SearchField', 'Popup', 'Combobox',
'RadioButtons', 'ProgressIndicator', 'ImageView', 'TabView', 'TableView', 'ListView',
'OutlineView', 'SplitView', 'Font', 'FontFamily', 'FontSize', 'FontTrait', 'Color', 'Pack',
'TextAlignment', 'HLayout', 'VLayout', 'VHLayout', 'SegmentedControl', 'Slider',
'NumberFormatter', 'NumberStyle', 'NLSTR',
}
module_globals = {name: globals()[name] for name in to_include}
module_globals['args'] = args
module_locals = {}
sys.path.insert(0, op.dirname(modulePath))
execfile(modulePath, module_globals, module_locals)
del sys.path[0]
assert 'result' in module_locals
tmpl = CodeTemplate(UNIT_TMPL)
if runmode:
owner._clear()
owner._name = 'nil'
ownerclass = 'id'
ownerimport = None
# We do this to avoid custom OBJC classes definition from preventing compilation.
for value in module_locals.values():
if hasattr(value, 'OBJC_CLASS') and hasattr(value.__class__, 'OBJC_CLASS'):
value.OBJC_CLASS = value.__class__.OBJC_CLASS
else:
ownerclass = module_locals.get('ownerclass', 'id')
ownerimport = module_locals.get('ownerimport')
if ownerimport:
ownerimport = "#import \"%s\"" % ownerimport
else:
ownerimport = ''
if ownerclass == 'id':
ownerdecl = "id owner"
else:
ownerdecl = "%s *owner" % ownerclass
if dest_header:
tmpl.mainimport = "#import \"{}.h\"".format(dest_basename)
else:
tmpl.mainimport = "#import \"XiblessSupport.h\""
tmpl.ownerimport = ownerimport
for key, value in module_locals.items():
if isinstance(value, GeneratedItem) and value.varname.startswith('_tmp'):
value.varname = key
toGenerate = globalvars.globalGenerationCounter.createdItems
codePieces = []
for item in toGenerate:
if item.generated:
continue
code = item.generate()
if code:
codePieces.append(code)
for item in toGenerate:
code = item.generateFinalize()
if code:
codePieces.append(code)
result = module_locals['result']
funcsig = "{}* create{}({})".format(result.OBJC_CLASS, dest_basename, ownerdecl)
tmpl.funcsig = funcsig
tmpl.contents = '\n'.join(codePieces)
from xibless import __version__ # We have to import it here to avoid circular references
autogen_comment = AUTOGEN_COMMENT.format(version=__version__, timestamp=datetime.now().strftime('%c'))
with open(dest, 'wb') as fp:
fp.write(autogen_comment.encode('utf-8'))
fp.write(tidyCode(tmpl.render()).encode('utf-8'))
if dest_header:
tmpl = CodeTemplate(HEADER_TMPL)
tmpl.funcsig = funcsig
tmpl.ownerimport = ownerimport
with open(dest_header, 'wt') as fp:
fp.write(autogen_comment)
fp.write(tidyCode(tmpl.render()))
copy_support_unit(op.dirname(dest))
def runUI(modulePath):
runtemplatePath = op.join(op.dirname(op.abspath(__file__)), 'runtemplate')
assert op.exists(runtemplatePath)
tmpPath = tempfile.mkdtemp()
destPath = op.join(tmpPath, 'runtemplate')
shutil.copytree(runtemplatePath, destPath)
wscriptPath = op.join(destPath, 'wscript')
with open(wscriptPath, 'rt') as fp:
wscriptContent = fp.read()
wscriptContent = wscriptContent.replace('{{script_path}}', op.abspath(modulePath))
with open(wscriptPath, 'wt') as fp:
fp.write(wscriptContent)
cmd = 'cd "%s" && python ./waf configure && python ./waf && open build/RunUI.app -W && cd ../.. && rm -r "%s"' % (destPath, tmpPath)
p = Popen(cmd, shell=True)
p.wait()
def tidyCode(code):
lines = (l.strip() for l in code.split('\n'))
result = []
level = 0
for line in lines:
if not line:
if result and result[-1] != '':
result.append('')
continue
level -= line.count('}')
result.append((' ' * (level * 4)) + line)
level += line.count('{')
return '\n'.join(result)
def copy_support_unit(destfolder):
DATA_PATH = op.join(op.dirname(__file__), 'data')
if not op.exists(destfolder):
os.makedirs(destfolder)
if modified_after(op.join(DATA_PATH, 'XiblessSupport.h'), op.join(destfolder, 'XiblessSupport.h')):
shutil.copy(op.join(DATA_PATH, 'XiblessSupport.h'), destfolder)
if modified_after(op.join(DATA_PATH, 'XiblessSupport.m'), op.join(destfolder, 'XiblessSupport.m')):
shutil.copy(op.join(DATA_PATH, 'XiblessSupport.m'), destfolder)
| bsd-3-clause |
apache/incubator-mxnet | example/restricted-boltzmann-machine/binary_rbm.py | 18 | 12873 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import numpy as np
import mxnet as mx
class BinaryRBM(mx.operator.CustomOp):
def __init__(self, k):
self.k = k # Persistent contrastive divergence k
def forward(self, is_train, req, in_data, out_data, aux):
visible_layer_data = in_data[0] # (num_batch, num_visible)
visible_layer_bias = in_data[1] # (num_visible,)
hidden_layer_bias = in_data[2] # (num_hidden,)
interaction_weight= in_data[3] # (num_visible, num_hidden)
if is_train:
_, hidden_layer_prob_1 = self.sample_hidden_layer(visible_layer_data, hidden_layer_bias, interaction_weight)
hidden_layer_sample = aux[1] # The initial state of the Gibbs sampling for persistent CD
else:
hidden_layer_sample, hidden_layer_prob_1 = self.sample_hidden_layer(visible_layer_data, hidden_layer_bias, interaction_weight)
# k-step Gibbs sampling
for _ in range(self.k):
visible_layer_sample, visible_layer_prob_1 = self.sample_visible_layer(hidden_layer_sample, visible_layer_bias, interaction_weight)
hidden_layer_sample, _ = self.sample_hidden_layer(visible_layer_sample, hidden_layer_bias, interaction_weight)
if is_train:
# Used in backward and next forward
aux[0][:] = visible_layer_sample
aux[1][:] = hidden_layer_sample
self.assign(out_data[0], req[0], visible_layer_prob_1)
self.assign(out_data[1], req[1], hidden_layer_prob_1)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
visible_layer_data = in_data[0] # (num_batch, num_visible)
visible_layer_sample = aux[0] # (num_batch, num_visible)
hidden_layer_prob_1 = out_data[1] # (num_batch, num_hidden)
hidden_layer_sample = aux[1] # (num_batch, num_hidden)
grad_visible_layer_bias = (visible_layer_sample - visible_layer_data).mean(axis=0)
grad_hidden_layer_bias = (hidden_layer_sample - hidden_layer_prob_1).mean(axis=0)
grad_interaction_weight= (mx.nd.linalg.gemm2(visible_layer_sample.expand_dims(2), hidden_layer_sample.expand_dims(1)) -
mx.nd.linalg.gemm2(visible_layer_data.expand_dims(2), hidden_layer_prob_1.expand_dims(1))
).mean(axis=0)
# We don't need the gradient on the visible layer input
self.assign(in_grad[1], req[1], grad_visible_layer_bias)
self.assign(in_grad[2], req[2], grad_hidden_layer_bias)
self.assign(in_grad[3], req[3], grad_interaction_weight)
def sample_hidden_layer(self, visible_layer_batch, hidden_layer_bias, interaction_weight):
return self.sample_layer(visible_layer_batch, hidden_layer_bias, interaction_weight, False)
def sample_visible_layer(self, hidden_layer_batch, visible_layer_bias, interaction_weight):
return self.sample_layer(hidden_layer_batch, visible_layer_bias, interaction_weight, True)
def sample_layer(self, other_layer_sample, layer_bias, interaction_weight, interaction_transpose):
prob_1 = mx.nd.linalg.gemm(
other_layer_sample,
interaction_weight,
layer_bias.tile(reps=(other_layer_sample.shape[0], 1)),
transpose_b=interaction_transpose) # (num_batch, num_units_in_layer)
prob_1.sigmoid(out=prob_1)
return mx.nd.random.uniform(shape=prob_1.shape) < prob_1, prob_1
@mx.operator.register('BinaryRBM')
class BinaryRBMProp(mx.operator.CustomOpProp):
# Auxiliary states are requested only if `for_training` is true.
def __init__(self, num_hidden, k, for_training):
super(BinaryRBMProp, self).__init__(False)
self.num_hidden = int(num_hidden)
self.k = int(k)
self.for_training = ast.literal_eval(for_training)
def list_arguments(self):
# 0: (batch size, the number of visible units)
# 1: (the number of visible units,)
# 2: (the number of hidden units,)
# 3: (the number of visible units, the number of hidden units)
return ['data', 'visible_layer_bias', 'hidden_layer_bias', 'interaction_weight']
def list_outputs(self):
# 0: The probabilities that each visible unit is 1 after `k` steps of Gibbs sampling starting from the given `data`.
# (batch size, the number of visible units)
# 1: The probabilities that each hidden unit is 1 conditional on the given `data`.
# (batch size, the number of hidden units)
return ['visible_layer_prob_1', 'hidden_layer_prob_1']
def list_auxiliary_states(self):
# Used only if `self.for_trainig is true.
# 0: Store the visible layer samples obtained in the forward pass, used in the backward pass.
# (batch size, the number of visible units)
# 1: Store the hidden layer samples obtained in the forward pass, used in the backward and next forward pass.
# (batch size, the number of hidden units)
return ['aux_visible_layer_sample', 'aux_hidden_layer_sample'] if self.for_training else []
def infer_shape(self, in_shapes):
visible_layer_data_shape = in_shapes[0] # The input data
visible_layer_bias_shape = (visible_layer_data_shape[1],)
hidden_layer_bias_shape = (self.num_hidden,)
interaction_shape = (visible_layer_data_shape[1], self.num_hidden)
visible_layer_sample_shape = visible_layer_data_shape
visible_layer_prob_1_shape = visible_layer_sample_shape
hidden_layer_sample_shape = (visible_layer_data_shape[0], self.num_hidden)
hidden_layer_prob_1_shape = hidden_layer_sample_shape
return [visible_layer_data_shape, visible_layer_bias_shape, hidden_layer_bias_shape, interaction_shape], \
[visible_layer_prob_1_shape, hidden_layer_prob_1_shape], \
[visible_layer_sample_shape, hidden_layer_sample_shape] if self.for_training else []
def infer_type(self, in_type):
return [in_type[0], in_type[0], in_type[0], in_type[0]], \
[in_type[0], in_type[0]], \
[in_type[0], in_type[0]] if self.for_training else []
def create_operator(self, ctx, in_shapes, in_dtypes):
return BinaryRBM(self.k)
# For gluon API
class BinaryRBMBlock(mx.gluon.HybridBlock):
def __init__(self, num_hidden, k, for_training, **kwargs):
super(BinaryRBMBlock, self).__init__(**kwargs)
with self.name_scope():
self.num_hidden = num_hidden
self.k = k
self.for_training = for_training
self.visible_layer_bias = self.params.get('visible_layer_bias', shape=(0,), allow_deferred_init=True)
self.hidden_layer_bias = self.params.get('hidden_layer_bias', shape=(0,), allow_deferred_init=True)
self.interaction_weight= self.params.get('interaction_weight', shape=(0, 0), allow_deferred_init=True)
if for_training:
self.aux_visible_layer_sample = self.params.get('aux_visible_layer_sample', shape=(0, 0), allow_deferred_init=True)
self.aux_hidden_layer_sample = self.params.get('aux_hidden_layer_sample', shape=(0, 0), allow_deferred_init=True)
def hybrid_forward(self, F, data, visible_layer_bias, hidden_layer_bias, interaction_weight, aux_visible_layer_sample=None, aux_hidden_layer_sample=None):
# As long as `for_training` is kept constant, this conditional statement does not prevent hybridization.
if self.for_training:
return F.Custom(
data,
visible_layer_bias,
hidden_layer_bias,
interaction_weight,
aux_visible_layer_sample,
aux_hidden_layer_sample,
num_hidden=self.num_hidden,
k=self.k,
for_training=self.for_training,
op_type='BinaryRBM')
else:
return F.Custom(
data,
visible_layer_bias,
hidden_layer_bias,
interaction_weight,
num_hidden=self.num_hidden,
k=self.k,
for_training=self.for_training,
op_type='BinaryRBM')
def estimate_log_likelihood(visible_layer_bias, hidden_layer_bias, interaction_weight, ais_batch_size, ais_num_batch, ais_intermediate_steps, ais_burn_in_steps, data, ctx):
# The base-rate RBM with no hidden layer. The visible layer bias is set to the same with the given RBM.
# This is not the only possible choice but simple and works well.
base_rate_visible_layer_bias = visible_layer_bias
base_rate_visible_prob_1 = base_rate_visible_layer_bias.sigmoid()
log_base_rate_z = base_rate_visible_layer_bias.exp().log1p().sum()
def log_intermediate_unnormalized_prob(visible_layer_sample, beta):
p = mx.nd.dot(
visible_layer_sample,
(1 - beta) * base_rate_visible_layer_bias + beta * visible_layer_bias)
if beta != 0:
p += mx.nd.linalg.gemm(
visible_layer_sample,
interaction_weight,
hidden_layer_bias.tile(reps=(visible_layer_sample.shape[0], 1)),
transpose_b=False,
alpha=beta,
beta=beta).exp().log1p().sum(axis=1)
return p
def sample_base_rbm():
rands = mx.nd.random.uniform(shape=(ais_batch_size, base_rate_visible_prob_1.shape[0]), ctx=ctx)
return rands < base_rate_visible_prob_1.tile(reps=(ais_batch_size, 1))
def sample_intermediate_visible_layer(visible_layer_sample, beta):
for _ in range(ais_burn_in_steps):
hidden_prob_1 = mx.nd.linalg.gemm(
visible_layer_sample,
interaction_weight,
hidden_layer_bias.tile(reps=(visible_layer_sample.shape[0], 1)),
transpose_b=False,
alpha=beta,
beta=beta)
hidden_prob_1.sigmoid(out=hidden_prob_1)
hidden_layer_sample = mx.nd.random.uniform(shape=hidden_prob_1.shape, ctx=ctx) < hidden_prob_1
visible_prob_1 = mx.nd.linalg.gemm(
hidden_layer_sample,
interaction_weight,
visible_layer_bias.tile(reps=(hidden_layer_sample.shape[0], 1)),
transpose_b=True,
alpha=beta,
beta=beta) + (1 - beta) * base_rate_visible_layer_bias
visible_prob_1.sigmoid(out=visible_prob_1)
visible_layer_sample = mx.nd.random.uniform(shape=visible_prob_1.shape, ctx=ctx) < visible_prob_1
return visible_layer_sample
def array_from_batch(batch):
if isinstance(batch, mx.io.DataBatch):
return batch.data[0].as_in_context(ctx).flatten()
else: # batch is an instance of list in the case of gluon DataLoader
return batch[0].as_in_context(ctx).flatten()
importance_weight_sum = 0
num_ais_samples = ais_num_batch * ais_batch_size
for _ in range(ais_num_batch):
log_importance_weight = 0
visible_layer_sample = sample_base_rbm()
for n in range(1, ais_intermediate_steps + 1):
beta = 1. * n / ais_intermediate_steps
log_importance_weight += \
log_intermediate_unnormalized_prob(visible_layer_sample, beta) - \
log_intermediate_unnormalized_prob(visible_layer_sample, (n - 1.) / ais_intermediate_steps)
visible_layer_sample = sample_intermediate_visible_layer(visible_layer_sample, beta)
importance_weight_sum += log_importance_weight.exp().sum()
log_z = (importance_weight_sum / num_ais_samples).log() + log_base_rate_z
log_likelihood = 0
num_data = 0
for batch in data:
batch_array = array_from_batch(batch)
log_likelihood += log_intermediate_unnormalized_prob(batch_array, 1) - log_z
num_data += batch_array.shape[0]
log_likelihood = log_likelihood.sum() / num_data
return log_likelihood.asscalar(), log_z.asscalar()
| apache-2.0 |
crazy-cat/incubator-mxnet | python/mxnet/io.py | 11 | 33251 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Data iterators for common data formats."""
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
import sys
import ctypes
import logging
import threading
try:
import h5py
except ImportError:
h5py = None
import numpy as np
from .base import _LIB
from .base import c_array, c_str, mx_uint, py_str
from .base import DataIterHandle, NDArrayHandle
from .base import mx_real_t
from .base import check_call, build_param_doc as _build_param_doc
from .ndarray import NDArray
from .ndarray.sparse import CSRNDArray
from .ndarray import _ndarray_cls
from .ndarray import array
from .ndarray import concatenate
class DataDesc(namedtuple('DataDesc', ['name', 'shape'])):
"""DataDesc is used to store name, shape, type and layout
information of the data or the label.
The `layout` describes how the axes in `shape` should be interpreted,
for example for image data setting `layout=NCHW` indicates
that the first axis is number of examples in the batch(N),
C is number of channels, H is the height and W is the width of the image.
For sequential data, by default `layout` is set to ``NTC``, where
N is number of examples in the batch, T the temporal axis representing time
and C is the number of channels.
Parameters
----------
cls : DataDesc
The class.
name : str
Data name.
shape : tuple of int
Data shape.
dtype : np.dtype, optional
Data type.
layout : str, optional
Data layout.
"""
def __new__(cls, name, shape, dtype=mx_real_t, layout='NCHW'): # pylint: disable=super-on-old-class
ret = super(cls, DataDesc).__new__(cls, name, shape)
ret.dtype = dtype
ret.layout = layout
return ret
def __repr__(self):
return "DataDesc[%s,%s,%s,%s]" % (self.name, self.shape, self.dtype,
self.layout)
@staticmethod
def get_batch_axis(layout):
"""Get the dimension that corresponds to the batch size.
When data parallelism is used, the data will be automatically split and
concatenated along the batch-size dimension. Axis can be -1, which means
the whole array will be copied for each data-parallelism device.
Parameters
----------
layout : str
layout string. For example, "NCHW".
Returns
-------
int
An axis indicating the batch_size dimension.
"""
if layout is None:
return 0
return layout.find('N')
@staticmethod
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name, shape)
types : a tuple of (name, type)
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes]
class DataBatch(object):
"""A data batch.
MXNet's data iterator returns a batch of data for each `next` call.
This data contains `batch_size` number of examples.
If the input data consists of images, then shape of these images depend on
the `layout` attribute of `DataDesc` object in `provide_data` parameter.
If `layout` is set to 'NCHW' then, images should be stored in a 4-D matrix
of shape ``(batch_size, num_channel, height, width)``.
If `layout` is set to 'NHWC' then, images should be stored in a 4-D matrix
of shape ``(batch_size, height, width, num_channel)``.
The channels are often in RGB order.
Parameters
----------
data : list of `NDArray`, each array containing `batch_size` examples.
A list of input data.
label : list of `NDArray`, each array often containing a 1-dimensional array. optional
A list of input labels.
pad : int, optional
The number of examples padded at the end of a batch. It is used when the
total number of examples read is not divisible by the `batch_size`.
These extra padded examples are ignored in prediction.
index : numpy.array, optional
The example indices in this batch.
bucket_key : int, optional
The bucket key, used for bucketing module.
provide_data : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the data.
The *i*-th element describes the name and shape of ``data[i]``.
provide_label : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the label.
The *i*-th element describes the name and shape of ``label[i]``.
"""
def __init__(self, data, label=None, pad=None, index=None,
bucket_key=None, provide_data=None, provide_label=None):
if data is not None:
assert isinstance(data, (list, tuple)), "Data must be list of NDArrays"
if label is not None:
assert isinstance(label, (list, tuple)), "Label must be list of NDArrays"
self.data = data
self.label = label
self.pad = pad
self.index = index
self.bucket_key = bucket_key
self.provide_data = provide_data
self.provide_label = provide_label
def __str__(self):
data_shapes = [d.shape for d in self.data]
label_shapes = [l.shape for l in self.label]
return "{}: data shapes: {} label shapes: {}".format(
self.__class__.__name__,
data_shapes,
label_shapes)
class DataIter(object):
"""The base class for an MXNet data iterator.
All I/O in MXNet is handled by specializations of this class. Data iterators
in MXNet are similar to standard-iterators in Python. On each call to `next`
they return a `DataBatch` which represents the next batch of data. When
there is no more data to return, it raises a `StopIteration` exception.
Parameters
----------
batch_size : int, optional
The batch size, namely the number of items in the batch.
See Also
--------
NDArrayIter : Data-iterator for MXNet NDArray or numpy-ndarray objects.
CSVIter : Data-iterator for csv data.
LibSVMIter : Data-iterator for libsvm data.
ImageIter : Data-iterator for images.
"""
def __init__(self, batch_size=0):
self.batch_size = batch_size
def __iter__(self):
return self
def reset(self):
"""Reset the iterator to the begin of the data."""
pass
def next(self):
"""Get next data batch from iterator.
Returns
-------
DataBatch
The data of next batch.
Raises
------
StopIteration
If the end of the data is reached.
"""
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=self.getindex())
else:
raise StopIteration
def __next__(self):
return self.next()
def iter_next(self):
"""Move to the next batch.
Returns
-------
boolean
Whether the move is successful.
"""
pass
def getdata(self):
"""Get data of current batch.
Returns
-------
list of NDArray
The data of the current batch.
"""
pass
def getlabel(self):
"""Get label of the current batch.
Returns
-------
list of NDArray
The label of the current batch.
"""
pass
def getindex(self):
"""Get index of the current batch.
Returns
-------
index : numpy.array
The indices of examples in the current batch.
"""
return None
def getpad(self):
"""Get the number of padding examples in the current batch.
Returns
-------
int
Number of padding examples in the current batch.
"""
pass
class ResizeIter(DataIter):
"""Resize a data iterator to a given number of batches.
Parameters
----------
data_iter : DataIter
The data iterator to be resized.
size : int
The number of batches per epoch to resize to.
reset_internal : bool
Whether to reset internal iterator on ResizeIter.reset.
Examples
--------
>>> nd_iter = mx.io.NDArrayIter(mx.nd.ones((100,10)), batch_size=25)
>>> resize_iter = mx.io.ResizeIter(nd_iter, 2)
>>> for batch in resize_iter:
... print(batch.data)
[<NDArray 25x10 @cpu(0)>]
[<NDArray 25x10 @cpu(0)>]
"""
def __init__(self, data_iter, size, reset_internal=True):
super(ResizeIter, self).__init__()
self.data_iter = data_iter
self.size = size
self.reset_internal = reset_internal
self.cur = 0
self.current_batch = None
self.provide_data = data_iter.provide_data
self.provide_label = data_iter.provide_label
self.batch_size = data_iter.batch_size
if hasattr(data_iter, 'default_bucket_key'):
self.default_bucket_key = data_iter.default_bucket_key
def reset(self):
self.cur = 0
if self.reset_internal:
self.data_iter.reset()
def iter_next(self):
if self.cur == self.size:
return False
try:
self.current_batch = self.data_iter.next()
except StopIteration:
self.data_iter.reset()
self.current_batch = self.data_iter.next()
self.cur += 1
return True
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class PrefetchingIter(DataIter):
"""Performs pre-fetch for other data iterators.
This iterator will create another thread to perform ``iter_next`` and then
store the data in memory. It potentially accelerates the data read, at the
cost of more memory usage.
Parameters
----------
iters : DataIter or list of DataIter
The data iterators to be pre-fetched.
rename_data : None or list of dict
The *i*-th element is a renaming map for the *i*-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data.
rename_label : None or list of dict
Similar to ``rename_data``.
Examples
--------
>>> iter1 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> iter2 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> piter = mx.io.PrefetchingIter([iter1, iter2],
... rename_data=[{'data': 'data_1'}, {'data': 'data_2'}])
>>> print(piter.provide_data)
[DataDesc[data_1,(25, 10L),<type 'numpy.float32'>,NCHW],
DataDesc[data_2,(25, 10L),<type 'numpy.float32'>,NCHW]]
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter > 0
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = self.provide_data[0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for i in self.data_taken:
i.set()
self.started = True
self.current_batch = [None for i in range(self.n_iter)]
self.next_batch = [None for i in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for i in self.data_taken:
i.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for i in self.data_ready:
i.wait()
for i in self.iters:
i.reset()
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
def iter_next(self):
for i in self.data_ready:
i.wait()
if self.next_batch[0] is None:
for i in self.next_batch:
assert i is None, "Number of entry mismatches between iterators"
return False
else:
for batch in self.next_batch:
assert batch.pad == self.next_batch[0].pad, \
"Number of entry mismatches between iterators"
self.current_batch = DataBatch(sum([batch.data for batch in self.next_batch], []),
sum([batch.label for batch in self.next_batch], []),
self.next_batch[0].pad,
self.next_batch[0].index,
provide_data=self.provide_data,
provide_label=self.provide_label)
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
def _init_data(data, allow_empty, default_name):
"""Convert data into canonical form."""
assert (data is not None) or allow_empty
if data is None:
data = []
if isinstance(data, (np.ndarray, NDArray, h5py.Dataset)
if h5py else (np.ndarray, NDArray)):
data = [data]
if isinstance(data, list):
if not allow_empty:
assert(len(data) > 0)
if len(data) == 1:
data = OrderedDict([(default_name, data[0])]) # pylint: disable=redefined-variable-type
else:
data = OrderedDict( # pylint: disable=redefined-variable-type
[('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)])
if not isinstance(data, dict):
raise TypeError("Input must be NDArray, numpy.ndarray, h5py.Dataset " + \
"a list of them or dict with them as values")
for k, v in data.items():
if not isinstance(v, (NDArray, h5py.Dataset) if h5py else NDArray):
try:
data[k] = array(v)
except:
raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + \
"should be NDArray, numpy.ndarray or h5py.Dataset")
return list(data.items())
class NDArrayIter(DataIter):
"""Returns an iterator for ``mx.nd.NDArray``, ``numpy.ndarray``, ``h5py.Dataset``
or ``mx.nd.sparse.CSRNDArray``.
Example usage:
----------
>>> data = np.arange(40).reshape((10,2,2))
>>> labels = np.ones([10, 1])
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> for batch in dataiter:
... print batch.data[0].asnumpy()
... batch.data[0].shape
...
[[[ 36. 37.]
[ 38. 39.]]
[[ 16. 17.]
[ 18. 19.]]
[[ 12. 13.]
[ 14. 15.]]]
(3L, 2L, 2L)
[[[ 32. 33.]
[ 34. 35.]]
[[ 4. 5.]
[ 6. 7.]]
[[ 24. 25.]
[ 26. 27.]]]
(3L, 2L, 2L)
[[[ 8. 9.]
[ 10. 11.]]
[[ 20. 21.]
[ 22. 23.]]
[[ 28. 29.]
[ 30. 31.]]]
(3L, 2L, 2L)
>>> dataiter.provide_data # Returns a list of `DataDesc`
[DataDesc[data,(3, 2L, 2L),<type 'numpy.float32'>,NCHW]]
>>> dataiter.provide_label # Returns a list of `DataDesc`
[DataDesc[softmax_label,(3, 1L),<type 'numpy.float32'>,NCHW]]
In the above example, data is shuffled as `shuffle` parameter is set to `True`
and remaining examples are discarded as `last_batch_handle` parameter is set to `discard`.
Usage of `last_batch_handle` parameter:
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='pad')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Padding added after the examples read are over. So, 10/3+1 batches are created.
4
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Remaining examples are discarded. So, 10/3 batches are created.
3
`NDArrayIter` also supports multiple input and labels.
>>> data = {'data1':np.zeros(shape=(10,2,2)), 'data2':np.zeros(shape=(20,2,2))}
>>> label = {'label1':np.zeros(shape=(10,1)), 'label2':np.zeros(shape=(20,1))}
>>> dataiter = mx.io.NDArrayIter(data, label, 3, True, last_batch_handle='discard')
`NDArrayIter` also supports ``mx.nd.sparse.CSRNDArray`` with `shuffle` set to `False`
and `last_batch_handle` set to `discard`.
>>> csr_data = mx.nd.array(np.arange(40).reshape((10,4))).tostype('csr')
>>> labels = np.ones([10, 1])
>>> dataiter = mx.io.NDArrayIter(csr_data, labels, 3, last_batch_handle='discard')
>>> [batch.data[0] for batch in dataiter]
[
<CSRNDArray 3x4 @cpu(0)>,
<CSRNDArray 3x4 @cpu(0)>,
<CSRNDArray 3x4 @cpu(0)>]
Parameters
----------
data: array or list of array or dict of string to array
The input data.
label: array or list of array or dict of string to array, optional
The input label.
batch_size: int
Batch size of data.
shuffle: bool, optional
Whether to shuffle the data.
Only supported if no h5py.Dataset inputs are used.
last_batch_handle : str, optional
How to handle the last batch. This parameter can be 'pad', 'discard' or
'roll_over'. 'roll_over' is intended for training and can cause problems
if used for prediction.
data_name : str, optional
The data name.
label_name : str, optional
The label name.
"""
def __init__(self, data, label=None, batch_size=1, shuffle=False,
last_batch_handle='pad', data_name='data',
label_name='softmax_label'):
super(NDArrayIter, self).__init__(batch_size)
self.data = _init_data(data, allow_empty=False, default_name=data_name)
self.label = _init_data(label, allow_empty=True, default_name=label_name)
if isinstance(data, CSRNDArray) or isinstance(label, CSRNDArray):
assert(shuffle is False), \
"`NDArrayIter` only supports ``CSRNDArray`` with `shuffle` set to `False`"
assert(last_batch_handle == 'discard'), "`NDArrayIter` only supports ``CSRNDArray``" \
" with `last_batch_handle` set to `discard`."
self.idx = np.arange(self.data[0][1].shape[0])
# shuffle data
if shuffle:
np.random.shuffle(self.idx)
self.data = [(k, array(v.asnumpy()[self.idx], v.context))
if not (isinstance(v, h5py.Dataset)
if h5py else False) else (k, v)
for k, v in self.data]
self.label = [(k, array(v.asnumpy()[self.idx], v.context))
if not (isinstance(v, h5py.Dataset)
if h5py else False) else (k, v)
for k, v in self.label]
# batching
if last_batch_handle == 'discard':
new_n = self.data[0][1].shape[0] - self.data[0][1].shape[0] % batch_size
self.idx = self.idx[:new_n]
self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
self.num_source = len(self.data_list)
self.num_data = self.idx.shape[0]
assert self.num_data >= batch_size, \
"batch_size needs to be smaller than data size."
self.cursor = -batch_size
self.batch_size = batch_size
self.last_batch_handle = last_batch_handle
@property
def provide_data(self):
"""The name and shape of data provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.data
]
@property
def provide_label(self):
"""The name and shape of label provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.label
]
def hard_reset(self):
"""Ignore roll over data and set to start."""
self.cursor = -self.batch_size
def reset(self):
if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data:
self.cursor = -self.batch_size + (self.cursor%self.num_data)%self.batch_size
else:
self.cursor = -self.batch_size
def iter_next(self):
self.cursor += self.batch_size
return self.cursor < self.num_data
def next(self):
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=None)
else:
raise StopIteration
def _getdata(self, data_source):
"""Load data from underlying arrays, internal use only."""
assert(self.cursor < self.num_data), "DataIter needs reset."
if self.cursor + self.batch_size <= self.num_data:
return [
# np.ndarray or NDArray case
x[1][self.cursor:self.cursor + self.batch_size]
if isinstance(x[1], (np.ndarray, NDArray)) else
# h5py (only supports indices in increasing order)
array(x[1][sorted(self.idx[
self.cursor:self.cursor + self.batch_size])][[
list(self.idx[self.cursor:
self.cursor + self.batch_size]).index(i)
for i in sorted(self.idx[
self.cursor:self.cursor + self.batch_size])
]]) for x in data_source
]
else:
pad = self.batch_size - self.num_data + self.cursor
return [
# np.ndarray or NDArray case
concatenate([x[1][self.cursor:], x[1][:pad]])
if isinstance(x[1], (np.ndarray, NDArray)) else
# h5py (only supports indices in increasing order)
concatenate([
array(x[1][sorted(self.idx[self.cursor:])][[
list(self.idx[self.cursor:]).index(i)
for i in sorted(self.idx[self.cursor:])
]]),
array(x[1][sorted(self.idx[:pad])][[
list(self.idx[:pad]).index(i)
for i in sorted(self.idx[:pad])
]])
]) for x in data_source
]
def getdata(self):
return self._getdata(self.data)
def getlabel(self):
return self._getdata(self.label)
def getpad(self):
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
else:
return 0
class MXDataIter(DataIter):
"""A python wrapper a C++ data iterator.
This iterator is the Python wrapper to all native C++ data iterators, such
as `CSVIter`, `ImageRecordIter`, `MNISTIter`, etc. When initializing
`CSVIter` for example, you will get an `MXDataIter` instance to use in your
Python code. Calls to `next`, `reset`, etc will be delegated to the
underlying C++ data iterators.
Usually you don't need to interact with `MXDataIter` directly unless you are
implementing your own data iterators in C++. To do that, please refer to
examples under the `src/io` folder.
Parameters
----------
handle : DataIterHandle, required
The handle to the underlying C++ Data Iterator.
data_name : str, optional
Data name. Default to "data".
label_name : str, optional
Label name. Default to "softmax_label".
See Also
--------
src/io : The underlying C++ data iterator implementation, e.g., `CSVIter`.
"""
def __init__(self, handle, data_name='data', label_name='softmax_label', **_):
super(MXDataIter, self).__init__()
self.handle = handle
# debug option, used to test the speed with io effect eliminated
self._debug_skip_load = False
# load the first batch to get shape information
self.first_batch = None
self.first_batch = self.next()
data = self.first_batch.data[0]
label = self.first_batch.label[0]
# properties
self.provide_data = [DataDesc(data_name, data.shape, data.dtype)]
self.provide_label = [DataDesc(label_name, label.shape, label.dtype)]
self.batch_size = data.shape[0]
def __del__(self):
check_call(_LIB.MXDataIterFree(self.handle))
def debug_skip_load(self):
# Set the iterator to simply return always first batch. This can be used
# to test the speed of network without taking the loading delay into
# account.
self._debug_skip_load = True
logging.info('Set debug_skip_load to be true, will simply return first batch')
def reset(self):
self._debug_at_begin = True
self.first_batch = None
check_call(_LIB.MXDataIterBeforeFirst(self.handle))
def next(self):
if self._debug_skip_load and not self._debug_at_begin:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
if self.first_batch is not None:
batch = self.first_batch
self.first_batch = None
return batch
self._debug_at_begin = False
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
if next_res.value:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
else:
raise StopIteration
def iter_next(self):
if self.first_batch is not None:
return True
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
return next_res.value
def getdata(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetData(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl, False)
def getlabel(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetLabel(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl, False)
def getindex(self):
index_size = ctypes.c_uint64(0)
index_data = ctypes.POINTER(ctypes.c_uint64)()
check_call(_LIB.MXDataIterGetIndex(self.handle,
ctypes.byref(index_data),
ctypes.byref(index_size)))
if index_size.value:
address = ctypes.addressof(index_data.contents)
dbuffer = (ctypes.c_uint64* index_size.value).from_address(address)
np_index = np.frombuffer(dbuffer, dtype=np.uint64)
return np_index.copy()
else:
return None
def getpad(self):
pad = ctypes.c_int(0)
check_call(_LIB.MXDataIterGetPadNum(self.handle, ctypes.byref(pad)))
return pad.value
def _make_io_iterator(handle):
"""Create an io iterator by handle."""
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXDataIterGetIterInfo( \
handle, ctypes.byref(name), ctypes.byref(desc), \
ctypes.byref(num_args), \
ctypes.byref(arg_names), \
ctypes.byref(arg_types), \
ctypes.byref(arg_descs)))
iter_name = py_str(name.value)
narg = int(num_args.value)
param_str = _build_param_doc(
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)])
doc_str = ('%s\n\n' +
'%s\n' +
'Returns\n' +
'-------\n' +
'MXDataIter\n'+
' The result iterator.')
doc_str = doc_str % (desc.value, param_str)
def creator(*args, **kwargs):
"""Create an iterator.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting data iterator.
Returns
-------
dataiter: Dataiter
The resulting data iterator.
"""
param_keys = []
param_vals = []
for k, val in kwargs.items():
param_keys.append(c_str(k))
param_vals.append(c_str(str(val)))
# create atomic symbol
param_keys = c_array(ctypes.c_char_p, param_keys)
param_vals = c_array(ctypes.c_char_p, param_vals)
iter_handle = DataIterHandle()
check_call(_LIB.MXDataIterCreateIter(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(iter_handle)))
if len(args):
raise TypeError('%s can only accept keyword arguments' % iter_name)
return MXDataIter(iter_handle, **kwargs)
creator.__name__ = iter_name
creator.__doc__ = doc_str
return creator
def _init_io_module():
"""List and add all the data iterators to current module."""
plist = ctypes.POINTER(ctypes.c_void_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = ctypes.c_void_p(plist[i])
dataiter = _make_io_iterator(hdl)
setattr(module_obj, dataiter.__name__, dataiter)
_init_io_module()
| apache-2.0 |
jbloom/phyloExpCM | examples/2014Analysis_lactamase/get_preferences.py | 1 | 2983 | """Gets preferences from Firnberg et al amino-acid "fitnesses".
Uses the reported fitnesses except:
* Fitness of wildtype at each site is always set to one.
* If no fitness is provided, uses the average fitness of all amino acids
with reported fitnesses plus wildtype at that site.
Written by Jesse Bloom, 2014."""
import math
import mapmuts.sequtils
import mapmuts.bayesian
def main():
"""Main body of script."""
# set up variables
infile = 'Firnberg_missense_mutation_fitnesses.csv'
outfile = 'amino_acid_preferences.txt'
numberingfile = 'sequential_to_Ambler.csv'
aminoacids = mapmuts.sequtils.AminoAcids()
residuerange = (1, 286) # first and last residues in sequential numbering
# read input
lines = open(infile).readlines()
plates = [float(c) for c in lines[1].split(',') if c and not c.isspace()]
fitnesses = {} # keyed by residue r, the amino acid a
wts = {} # keyed by residue r, value is wildtype
numbering_f = open(numberingfile, 'w')
numbering_f.write('#SEQUENTIAL,AMBLER\n')
for r in range(residuerange[0], residuerange[1] + 1):
fitnesses[r] = {}
lineindex = 2 + (r - 1) * len(aminoacids)
wt = None
for a in aminoacids:
line = lines[lineindex]
lineindex += 1
entries = line.split(',')
assert entries[2] == a, "Amino acid mismatch, expected %s on line %s" % (a, line)
if wt == None:
numbering_f.write('%d,%d\n' % (r, int(entries[0])))
wt = entries[1]
assert wt in aminoacids, "Invalid amino acid %s on line %s" % (wt, line)
assert wt == entries[1], "Wildtype mismatch, expected %s on line %s" % (wt, line)
wts[r] = wt
reportedfitness = entries[4 + len(plates)]
if reportedfitness:
reportedfitness = float(reportedfitness)
else:
reportedfitness = None
if a == wt:
fitnesses[r][a] = 1.0
else:
fitnesses[r][a] = reportedfitness
numbering_f.close()
# Now compute preferences
f = open(outfile, 'w')
f.write("#SITE\tWT_AA\tSITE_ENTROPY\t%s\n" % '\t'.join(['PI_%s' % a for a in aminoacids]))
for r in range(residuerange[0], residuerange[1] + 1):
knownfitnesses = [x for x in fitnesses[r].values() if x != None]
meanknownfitness = sum(knownfitnesses) / float(len(knownfitnesses))
pi = {}
for a in aminoacids:
if fitnesses[r][a] == None:
fitnesses[r][a] = meanknownfitness # assign unknown fitnesses mean for that residue
pi[a] = fitnesses[r][a]
totfitnesses = sum(fitnesses[r].values())
f.write('%d\t%s\t%.3f\t%s\n' % (r, wts[r], mapmuts.bayesian.SiteEntropy(pi), '\t'.join(['%g' % (fitnesses[r][a] / totfitnesses) for a in aminoacids])))
f.close()
if __name__ == '__main__':
main() # run the script
| gpl-3.0 |
xray/xray | xarray/tests/test_indexing.py | 3 | 26721 | import itertools
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable
from xarray.core import indexing, nputils
from . import IndexerMaker, ReturnItem, assert_array_equal, raises_regex
B = IndexerMaker(indexing.BasicIndexer)
class TestIndexers:
def set_to_zero(self, x, i):
x = x.copy()
x[i] = 0
return x
def test_expanded_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
arr = ReturnItem()
for i in [
arr[:],
arr[...],
arr[0, :, 10],
arr[..., 10],
arr[:5, ..., 0],
arr[..., 0, :],
arr[y],
arr[y, y],
arr[..., y, y],
arr[..., 0, 1, 2, 3, 4],
]:
j = indexing.expanded_indexer(i, x.ndim)
assert_array_equal(x[i], x[j])
assert_array_equal(self.set_to_zero(x, i), self.set_to_zero(x, j))
with raises_regex(IndexError, "too many indices"):
indexing.expanded_indexer(arr[1, 2, 3], 2)
def test_asarray_tuplesafe(self):
res = indexing._asarray_tuplesafe(("a", 1))
assert isinstance(res, np.ndarray)
assert res.ndim == 0
assert res.item() == ("a", 1)
res = indexing._asarray_tuplesafe([(0,), (1,)])
assert res.shape == (2,)
assert res[0] == (0,)
assert res[1] == (1,)
def test_stacked_multiindex_min_max(self):
data = np.random.randn(3, 23, 4)
da = DataArray(
data,
name="value",
dims=["replicate", "rsample", "exp"],
coords=dict(
replicate=[0, 1, 2], exp=["a", "b", "c", "d"], rsample=list(range(23))
),
)
da2 = da.stack(sample=("replicate", "rsample"))
s = da2.sample
assert_array_equal(da2.loc["a", s.max()], data[2, 22, 0])
assert_array_equal(da2.loc["b", s.min()], data[0, 0, 1])
def test_convert_label_indexer(self):
# TODO: add tests that aren't just for edge cases
index = pd.Index([1, 2, 3])
with raises_regex(KeyError, "not all values found"):
indexing.convert_label_indexer(index, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(index, 0)
with raises_regex(ValueError, "does not have a MultiIndex"):
indexing.convert_label_indexer(index, {"one": 0})
mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
with raises_regex(KeyError, "not all values found"):
indexing.convert_label_indexer(mindex, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(mindex, 0)
with pytest.raises(ValueError):
indexing.convert_label_indexer(index, {"three": 0})
with pytest.raises(IndexError):
indexing.convert_label_indexer(mindex, (slice(None), 1, "no_level"))
def test_convert_unsorted_datetime_index_raises(self):
index = pd.to_datetime(["2001", "2000", "2002"])
with pytest.raises(KeyError):
# pandas will try to convert this into an array indexer. We should
# raise instead, so we can be sure the result of indexing with a
# slice is always a view.
indexing.convert_label_indexer(index, slice("2001", "2002"))
def test_get_dim_indexers(self):
mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
mdata = DataArray(range(4), [("x", mindex)])
dim_indexers = indexing.get_dim_indexers(mdata, {"one": "a", "two": 1})
assert dim_indexers == {"x": {"one": "a", "two": 1}}
with raises_regex(ValueError, "cannot combine"):
indexing.get_dim_indexers(mdata, {"x": "a", "two": 1})
with raises_regex(ValueError, "do not exist"):
indexing.get_dim_indexers(mdata, {"y": "a"})
with raises_regex(ValueError, "do not exist"):
indexing.get_dim_indexers(mdata, {"four": 1})
def test_remap_label_indexers(self):
def test_indexer(data, x, expected_pos, expected_idx=None):
pos, idx = indexing.remap_label_indexers(data, {"x": x})
assert_array_equal(pos.get("x"), expected_pos)
assert_array_equal(idx.get("x"), expected_idx)
data = Dataset({"x": ("x", [1, 2, 3])})
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
mdata = DataArray(range(8), [("x", mindex)])
test_indexer(data, 1, 0)
test_indexer(data, np.int32(1), 0)
test_indexer(data, Variable([], 1), 0)
test_indexer(mdata, ("a", 1, -1), 0)
test_indexer(
mdata,
("a", 1),
[True, True, False, False, False, False, False, False],
[-1, -2],
)
test_indexer(
mdata,
"a",
slice(0, 4, None),
pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
)
test_indexer(
mdata,
("a",),
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
)
test_indexer(mdata, [("a", 1, -1), ("b", 2, -2)], [0, 7])
test_indexer(mdata, slice("a", "b"), slice(0, 8, None))
test_indexer(mdata, slice(("a", 1), ("b", 1)), slice(0, 6, None))
test_indexer(mdata, {"one": "a", "two": 1, "three": -1}, 0)
test_indexer(
mdata,
{"one": "a", "two": 1},
[True, True, False, False, False, False, False, False],
[-1, -2],
)
test_indexer(
mdata,
{"one": "a", "three": -1},
[True, False, True, False, False, False, False, False],
[1, 2],
)
test_indexer(
mdata,
{"one": "a"},
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
)
def test_read_only_view(self):
arr = DataArray(
np.random.rand(3, 3),
coords={"x": np.arange(3), "y": np.arange(3)},
dims=("x", "y"),
) # Create a 2D DataArray
arr = arr.expand_dims({"z": 3}, -1) # New dimension 'z'
arr["z"] = np.arange(3) # New coords to dimension 'z'
with pytest.raises(ValueError, match="Do you want to .copy()"):
arr.loc[0, 0, 0] = 999
class TestLazyArray:
def test_slice_slice(self):
arr = ReturnItem()
for size in [100, 99]:
# We test even/odd size cases
x = np.arange(size)
slices = [
arr[:3],
arr[:4],
arr[2:4],
arr[:1],
arr[:-1],
arr[5:-1],
arr[-5:-1],
arr[::-1],
arr[5::-1],
arr[:3:-1],
arr[:30:-1],
arr[10:4:],
arr[::4],
arr[4:4:4],
arr[:4:-4],
arr[::-2],
]
for i in slices:
for j in slices:
expected = x[i][j]
new_slice = indexing.slice_slice(i, j, size=size)
actual = x[new_slice]
assert_array_equal(expected, actual)
def test_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v = Variable(["i", "j", "k"], original)
lazy = indexing.LazilyOuterIndexedArray(x)
v_lazy = Variable(["i", "j", "k"], lazy)
arr = ReturnItem()
# test orthogonally applied indexers
indexers = [arr[:], 0, -2, arr[:3], [0, 1, 2, 3], [0], np.arange(10) < 5]
for i in indexers:
for j in indexers:
for k in indexers:
if isinstance(j, np.ndarray) and j.dtype.kind == "b":
j = np.arange(20) < 5
if isinstance(k, np.ndarray) and k.dtype.kind == "b":
k = np.arange(30) < 5
expected = np.asarray(v[i, j, k])
for actual in [
v_lazy[i, j, k],
v_lazy[:, j, k][i],
v_lazy[:, :, k][:, j][i],
]:
assert expected.shape == actual.shape
assert_array_equal(expected, actual)
assert isinstance(
actual._data, indexing.LazilyOuterIndexedArray
)
# make sure actual.key is appropriate type
if all(
isinstance(k, (int, slice)) for k in v_lazy._data.key.tuple
):
assert isinstance(v_lazy._data.key, indexing.BasicIndexer)
else:
assert isinstance(v_lazy._data.key, indexing.OuterIndexer)
# test sequentially applied indexers
indexers = [
(3, 2),
(arr[:], 0),
(arr[:2], -1),
(arr[:4], [0]),
([4, 5], 0),
([0, 1, 2], [0, 1]),
([0, 3, 5], arr[:2]),
]
for i, j in indexers:
expected = v[i][j]
actual = v_lazy[i][j]
assert expected.shape == actual.shape
assert_array_equal(expected, actual)
# test transpose
if actual.ndim > 1:
order = np.random.choice(actual.ndim, actual.ndim)
order = np.array(actual.dims)
transposed = actual.transpose(*order)
assert_array_equal(expected.transpose(*order), transposed)
assert isinstance(
actual._data,
(
indexing.LazilyVectorizedIndexedArray,
indexing.LazilyOuterIndexedArray,
),
)
assert isinstance(actual._data, indexing.LazilyOuterIndexedArray)
assert isinstance(actual._data.array, indexing.NumpyIndexingAdapter)
def test_vectorized_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v_eager = Variable(["i", "j", "k"], x)
lazy = indexing.LazilyOuterIndexedArray(x)
v_lazy = Variable(["i", "j", "k"], lazy)
arr = ReturnItem()
def check_indexing(v_eager, v_lazy, indexers):
for indexer in indexers:
actual = v_lazy[indexer]
expected = v_eager[indexer]
assert expected.shape == actual.shape
assert isinstance(
actual._data,
(
indexing.LazilyVectorizedIndexedArray,
indexing.LazilyOuterIndexedArray,
),
)
assert_array_equal(expected, actual)
v_eager = expected
v_lazy = actual
# test orthogonal indexing
indexers = [(arr[:], 0, 1), (Variable("i", [0, 1]),)]
check_indexing(v_eager, v_lazy, indexers)
# vectorized indexing
indexers = [
(Variable("i", [0, 1]), Variable("i", [0, 1]), slice(None)),
(slice(1, 3, 2), 0),
]
check_indexing(v_eager, v_lazy, indexers)
indexers = [
(slice(None, None, 2), 0, slice(None, 10)),
(Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
(Variable(["i", "j"], [[0, 1], [1, 2]]),),
]
check_indexing(v_eager, v_lazy, indexers)
indexers = [
(Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
(Variable(["i", "j"], [[0, 1], [1, 2]]),),
]
check_indexing(v_eager, v_lazy, indexers)
class TestCopyOnWriteArray:
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
wrapped[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.zeros(10))
def test_sub_array(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.CopyOnWriteArray)
child[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.arange(10))
assert_array_equal(child, np.zeros(5))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.CopyOnWriteArray(np.array(["foo", "bar"]))
assert np.array(x[B[0]][B[()]]) == "foo"
class TestMemoryCachedArray:
def test_wrapper(self):
original = indexing.LazilyOuterIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
assert_array_equal(wrapped, np.arange(10))
assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter)
def test_sub_array(self):
original = indexing.LazilyOuterIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.MemoryCachedArray)
assert_array_equal(child, np.arange(5))
assert isinstance(child.array, indexing.NumpyIndexingAdapter)
assert isinstance(wrapped.array, indexing.LazilyOuterIndexedArray)
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.MemoryCachedArray(original)
wrapped[B[:]] = 0
assert_array_equal(original, np.zeros(10))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.MemoryCachedArray(np.array(["foo", "bar"]))
assert np.array(x[B[0]][B[()]]) == "foo"
def test_base_explicit_indexer():
with pytest.raises(TypeError):
indexing.ExplicitIndexer(())
class Subclass(indexing.ExplicitIndexer):
pass
value = Subclass((1, 2, 3))
assert value.tuple == (1, 2, 3)
assert repr(value) == "Subclass((1, 2, 3))"
@pytest.mark.parametrize(
"indexer_cls",
[indexing.BasicIndexer, indexing.OuterIndexer, indexing.VectorizedIndexer],
)
def test_invalid_for_all(indexer_cls):
with pytest.raises(TypeError):
indexer_cls(None)
with pytest.raises(TypeError):
indexer_cls(([],))
with pytest.raises(TypeError):
indexer_cls((None,))
with pytest.raises(TypeError):
indexer_cls(("foo",))
with pytest.raises(TypeError):
indexer_cls((1.0,))
with pytest.raises(TypeError):
indexer_cls((slice("foo"),))
with pytest.raises(TypeError):
indexer_cls((np.array(["foo"]),))
def check_integer(indexer_cls):
value = indexer_cls((1, np.uint64(2))).tuple
assert all(isinstance(v, int) for v in value)
assert value == (1, 2)
def check_slice(indexer_cls):
(value,) = indexer_cls((slice(1, None, np.int64(2)),)).tuple
assert value == slice(1, None, 2)
assert isinstance(value.step, int)
def check_array1d(indexer_cls):
(value,) = indexer_cls((np.arange(3, dtype=np.int32),)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, [0, 1, 2])
def check_array2d(indexer_cls):
array = np.array([[1, 2], [3, 4]], dtype=np.int64)
(value,) = indexer_cls((array,)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, array)
def test_basic_indexer():
check_integer(indexing.BasicIndexer)
check_slice(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array1d(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.BasicIndexer)
def test_outer_indexer():
check_integer(indexing.OuterIndexer)
check_slice(indexing.OuterIndexer)
check_array1d(indexing.OuterIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.OuterIndexer)
def test_vectorized_indexer():
with pytest.raises(TypeError):
check_integer(indexing.VectorizedIndexer)
check_slice(indexing.VectorizedIndexer)
check_array1d(indexing.VectorizedIndexer)
check_array2d(indexing.VectorizedIndexer)
with raises_regex(ValueError, "numbers of dimensions"):
indexing.VectorizedIndexer(
(np.array(1, dtype=np.int64), np.arange(5, dtype=np.int64))
)
class Test_vectorized_indexer:
@pytest.fixture(autouse=True)
def setup(self):
self.data = indexing.NumpyIndexingAdapter(np.random.randn(10, 12, 13))
self.indexers = [
np.array([[0, 3, 2]]),
np.array([[0, 3, 3], [4, 6, 7]]),
slice(2, -2, 2),
slice(2, -2, 3),
slice(None),
]
def test_arrayize_vectorized_indexer(self):
for i, j, k in itertools.product(self.indexers, repeat=3):
vindex = indexing.VectorizedIndexer((i, j, k))
vindex_array = indexing._arrayize_vectorized_indexer(
vindex, self.data.shape
)
np.testing.assert_array_equal(self.data[vindex], self.data[vindex_array])
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((slice(None),)), shape=(5,)
)
np.testing.assert_array_equal(actual.tuple, [np.arange(5)])
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((np.arange(5),) * 3), shape=(8, 10, 12)
)
expected = np.stack([np.arange(5)] * 3)
np.testing.assert_array_equal(np.stack(actual.tuple), expected)
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((np.arange(5), slice(None))), shape=(8, 10)
)
a, b = actual.tuple
np.testing.assert_array_equal(a, np.arange(5)[:, np.newaxis])
np.testing.assert_array_equal(b, np.arange(10)[np.newaxis, :])
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((slice(None), np.arange(5))), shape=(8, 10)
)
a, b = actual.tuple
np.testing.assert_array_equal(a, np.arange(8)[np.newaxis, :])
np.testing.assert_array_equal(b, np.arange(5)[:, np.newaxis])
def get_indexers(shape, mode):
if mode == "vectorized":
indexed_shape = (3, 4)
indexer = tuple(np.random.randint(0, s, size=indexed_shape) for s in shape)
return indexing.VectorizedIndexer(indexer)
elif mode == "outer":
indexer = tuple(np.random.randint(0, s, s + 2) for s in shape)
return indexing.OuterIndexer(indexer)
elif mode == "outer_scalar":
indexer = (np.random.randint(0, 3, 4), 0, slice(None, None, 2))
return indexing.OuterIndexer(indexer[: len(shape)])
elif mode == "outer_scalar2":
indexer = (np.random.randint(0, 3, 4), -2, slice(None, None, 2))
return indexing.OuterIndexer(indexer[: len(shape)])
elif mode == "outer1vec":
indexer = [slice(2, -3) for s in shape]
indexer[1] = np.random.randint(0, shape[1], shape[1] + 2)
return indexing.OuterIndexer(tuple(indexer))
elif mode == "basic": # basic indexer
indexer = [slice(2, -3) for s in shape]
indexer[0] = 3
return indexing.BasicIndexer(tuple(indexer))
elif mode == "basic1": # basic indexer
return indexing.BasicIndexer((3,))
elif mode == "basic2": # basic indexer
indexer = [0, 2, 4]
return indexing.BasicIndexer(tuple(indexer[: len(shape)]))
elif mode == "basic3": # basic indexer
indexer = [slice(None) for s in shape]
indexer[0] = slice(-2, 2, -2)
indexer[1] = slice(1, -1, 2)
return indexing.BasicIndexer(tuple(indexer[: len(shape)]))
@pytest.mark.parametrize("size", [100, 99])
@pytest.mark.parametrize(
"sl", [slice(1, -1, 1), slice(None, -1, 2), slice(-1, 1, -1), slice(-1, 1, -2)]
)
def test_decompose_slice(size, sl):
x = np.arange(size)
slice1, slice2 = indexing._decompose_slice(sl, size)
expected = x[sl]
actual = x[slice1][slice2]
assert_array_equal(expected, actual)
@pytest.mark.parametrize("shape", [(10, 5, 8), (10, 3)])
@pytest.mark.parametrize(
"indexer_mode",
[
"vectorized",
"outer",
"outer_scalar",
"outer_scalar2",
"outer1vec",
"basic",
"basic1",
"basic2",
"basic3",
],
)
@pytest.mark.parametrize(
"indexing_support",
[
indexing.IndexingSupport.BASIC,
indexing.IndexingSupport.OUTER,
indexing.IndexingSupport.OUTER_1VECTOR,
indexing.IndexingSupport.VECTORIZED,
],
)
def test_decompose_indexers(shape, indexer_mode, indexing_support):
data = np.random.randn(*shape)
indexer = get_indexers(shape, indexer_mode)
backend_ind, np_ind = indexing.decompose_indexer(indexer, shape, indexing_support)
expected = indexing.NumpyIndexingAdapter(data)[indexer]
array = indexing.NumpyIndexingAdapter(data)[backend_ind]
if len(np_ind.tuple) > 0:
array = indexing.NumpyIndexingAdapter(array)[np_ind]
np.testing.assert_array_equal(expected, array)
if not all(isinstance(k, indexing.integer_types) for k in np_ind.tuple):
combined_ind = indexing._combine_indexers(backend_ind, shape, np_ind)
array = indexing.NumpyIndexingAdapter(data)[combined_ind]
np.testing.assert_array_equal(expected, array)
def test_implicit_indexing_adapter():
array = np.arange(10, dtype=np.int64)
implicit = indexing.ImplicitToExplicitIndexingAdapter(
indexing.NumpyIndexingAdapter(array), indexing.BasicIndexer
)
np.testing.assert_array_equal(array, np.asarray(implicit))
np.testing.assert_array_equal(array, implicit[:])
def test_implicit_indexing_adapter_copy_on_write():
array = np.arange(10, dtype=np.int64)
implicit = indexing.ImplicitToExplicitIndexingAdapter(
indexing.CopyOnWriteArray(array)
)
assert isinstance(implicit[:], indexing.ImplicitToExplicitIndexingAdapter)
def test_outer_indexer_consistency_with_broadcast_indexes_vectorized():
def nonzero(x):
if isinstance(x, np.ndarray) and x.dtype.kind == "b":
x = x.nonzero()[0]
return x
original = np.random.rand(10, 20, 30)
v = Variable(["i", "j", "k"], original)
arr = ReturnItem()
# test orthogonally applied indexers
indexers = [
arr[:],
0,
-2,
arr[:3],
np.array([0, 1, 2, 3]),
np.array([0]),
np.arange(10) < 5,
]
for i, j, k in itertools.product(indexers, repeat=3):
if isinstance(j, np.ndarray) and j.dtype.kind == "b": # match size
j = np.arange(20) < 4
if isinstance(k, np.ndarray) and k.dtype.kind == "b":
k = np.arange(30) < 8
_, expected, new_order = v._broadcast_indexes_vectorized((i, j, k))
expected_data = nputils.NumpyVIndexAdapter(v.data)[expected.tuple]
if new_order:
old_order = range(len(new_order))
expected_data = np.moveaxis(expected_data, old_order, new_order)
outer_index = indexing.OuterIndexer((nonzero(i), nonzero(j), nonzero(k)))
actual = indexing._outer_to_numpy_indexer(outer_index, v.shape)
actual_data = v.data[actual]
np.testing.assert_array_equal(actual_data, expected_data)
def test_create_mask_outer_indexer():
indexer = indexing.OuterIndexer((np.array([0, -1, 2]),))
expected = np.array([False, True, False])
actual = indexing.create_mask(indexer, (5,))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2])))
expected = np.array(2 * [[False, True, False]])
actual = indexing.create_mask(indexer, (5, 5, 5))
np.testing.assert_array_equal(expected, actual)
def test_create_mask_vectorized_indexer():
indexer = indexing.VectorizedIndexer((np.array([0, -1, 2]), np.array([0, 1, -1])))
expected = np.array([False, True, True])
actual = indexing.create_mask(indexer, (5,))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.VectorizedIndexer(
(np.array([0, -1, 2]), slice(None), np.array([0, 1, -1]))
)
expected = np.array([[False, True, True]] * 2).T
actual = indexing.create_mask(indexer, (5, 2))
np.testing.assert_array_equal(expected, actual)
def test_create_mask_basic_indexer():
indexer = indexing.BasicIndexer((-1,))
actual = indexing.create_mask(indexer, (3,))
np.testing.assert_array_equal(True, actual)
indexer = indexing.BasicIndexer((0,))
actual = indexing.create_mask(indexer, (3,))
np.testing.assert_array_equal(False, actual)
def test_create_mask_dask():
da = pytest.importorskip("dask.array")
indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2])))
expected = np.array(2 * [[False, True, False]])
actual = indexing.create_mask(
indexer, (5, 5, 5), da.empty((2, 3), chunks=((1, 1), (2, 1)))
)
assert actual.chunks == ((1, 1), (2, 1))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.VectorizedIndexer(
(np.array([0, -1, 2]), slice(None), np.array([0, 1, -1]))
)
expected = np.array([[False, True, True]] * 2).T
actual = indexing.create_mask(
indexer, (5, 2), da.empty((3, 2), chunks=((3,), (2,)))
)
assert isinstance(actual, da.Array)
np.testing.assert_array_equal(expected, actual)
with pytest.raises(ValueError):
indexing.create_mask(indexer, (5, 2), da.empty((5,), chunks=(1,)))
def test_create_mask_error():
with raises_regex(TypeError, "unexpected key type"):
indexing.create_mask((1, 2), (3, 4))
@pytest.mark.parametrize(
"indices, expected",
[
(np.arange(5), np.arange(5)),
(np.array([0, -1, -1]), np.array([0, 0, 0])),
(np.array([-1, 1, -1]), np.array([1, 1, 1])),
(np.array([-1, -1, 2]), np.array([2, 2, 2])),
(np.array([-1]), np.array([0])),
(np.array([0, -1, 1, -1, -1]), np.array([0, 0, 1, 1, 1])),
(np.array([0, -1, -1, -1, 1]), np.array([0, 0, 0, 0, 1])),
],
)
def test_posify_mask_subindexer(indices, expected):
actual = indexing._posify_mask_subindexer(indices)
np.testing.assert_array_equal(expected, actual)
| apache-2.0 |
RO-ny9/python-for-android | python-modules/twisted/twisted/internet/task.py | 56 | 23511 | # -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Scheduling utility methods and classes.
@author: Jp Calderone
"""
__metaclass__ = type
import time
from zope.interface import implements
from twisted.python import reflect
from twisted.python.failure import Failure
from twisted.internet import base, defer
from twisted.internet.interfaces import IReactorTime
class LoopingCall:
"""Call a function repeatedly.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
@ivar clock: A provider of
L{twisted.internet.interfaces.IReactorTime}. The default is
L{twisted.internet.reactor}. Feel free to set this to
something else, but it probably ought to be set *before*
calling L{start}.
@type _expectNextCallAt: C{float}
@ivar _expectNextCallAt: The time at which this instance most recently
scheduled itself to run.
@type _realLastTime: C{float}
@ivar _realLastTime: When counting skips, the time at which the skip counter
was last invoked.
@type _runAtStart: C{bool}
@ivar _runAtStart: A flag indicating whether the 'now' argument was passed
to L{LoopingCall.start}.
"""
call = None
running = False
deferred = None
interval = None
_expectNextCallAt = 0.0
_runAtStart = False
starttime = None
def __init__(self, f, *a, **kw):
self.f = f
self.a = a
self.kw = kw
from twisted.internet import reactor
self.clock = reactor
def withCount(cls, countCallable):
"""
An alternate constructor for L{LoopingCall} that makes available the
number of calls which should have occurred since it was last invoked.
Note that this number is an C{int} value; It represents the discrete
number of calls that should have been made. For example, if you are
using a looping call to display an animation with discrete frames, this
number would be the number of frames to advance.
The count is normally 1, but can be higher. For example, if the reactor
is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
returned from a previous call is not fired before an interval has
elapsed, or if the callable itself blocks for longer than an interval,
preventing I{itself} from being called.
@param countCallable: A callable that will be invoked each time the
resulting LoopingCall is run, with an integer specifying the number
of calls that should have been invoked.
@type countCallable: 1-argument callable which takes an C{int}
@return: An instance of L{LoopingCall} with call counting enabled,
which provides the count as the first positional argument.
@rtype: L{LoopingCall}
@since: 9.0
"""
def counter():
now = self.clock.seconds()
lastTime = self._realLastTime
if lastTime is None:
lastTime = self.starttime
if self._runAtStart:
lastTime -= self.interval
self._realLastTime = now
lastInterval = self._intervalOf(lastTime)
thisInterval = self._intervalOf(now)
count = thisInterval - lastInterval
return countCallable(count)
self = cls(counter)
self._realLastTime = None
return self
withCount = classmethod(withCount)
def _intervalOf(self, t):
"""
Determine the number of intervals passed as of the given point in
time.
@param t: The specified time (from the start of the L{LoopingCall}) to
be measured in intervals
@return: The C{int} number of intervals which have passed as of the
given point in time.
"""
elapsedTime = t - self.starttime
intervalNum = int(elapsedTime / self.interval)
return intervalNum
def start(self, interval, now=True):
"""
Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, ("Tried to start an already running "
"LoopingCall.")
if interval < 0:
raise ValueError, "interval must be >= 0"
self.running = True
d = self.deferred = defer.Deferred()
self.starttime = self.clock.seconds()
self._expectNextCallAt = self.starttime
self.interval = interval
self._runAtStart = now
if now:
self()
else:
self._reschedule()
return d
def stop(self):
"""Stop running function.
"""
assert self.running, ("Tried to stop a LoopingCall that was "
"not running.")
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self.deferred = self.deferred, None
d.callback(self)
def __call__(self):
def cb(result):
if self.running:
self._reschedule()
else:
d, self.deferred = self.deferred, None
d.callback(self)
def eb(failure):
self.running = False
d, self.deferred = self.deferred, None
d.errback(failure)
self.call = None
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(cb)
d.addErrback(eb)
def _reschedule(self):
"""
Schedule the next iteration of this looping call.
"""
if self.interval == 0:
self.call = self.clock.callLater(0, self)
return
currentTime = self.clock.seconds()
# Find how long is left until the interval comes around again.
untilNextTime = (self._expectNextCallAt - currentTime) % self.interval
# Make sure it is in the future, in case more than one interval worth
# of time passed since the previous call was made.
nextTime = max(
self._expectNextCallAt + self.interval, currentTime + untilNextTime)
# If the interval falls on the current time exactly, skip it and
# schedule the call for the next interval.
if nextTime == currentTime:
nextTime += self.interval
self._expectNextCallAt = nextTime
self.call = self.clock.callLater(nextTime - currentTime, self)
def __repr__(self):
if hasattr(self.f, 'func_name'):
func = self.f.func_name
if hasattr(self.f, 'im_class'):
func = self.f.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.f)
return 'LoopingCall<%r>(%s, *%s, **%s)' % (
self.interval, func, reflect.safe_repr(self.a),
reflect.safe_repr(self.kw))
class SchedulerError(Exception):
"""
The operation could not be completed because the scheduler or one of its
tasks was in an invalid state. This exception should not be raised
directly, but is a superclass of various scheduler-state-related
exceptions.
"""
class SchedulerStopped(SchedulerError):
"""
The operation could not complete because the scheduler was stopped in
progress or was already stopped.
"""
class TaskFinished(SchedulerError):
"""
The operation could not complete because the task was already completed,
stopped, encountered an error or otherwise permanently stopped running.
"""
class TaskDone(TaskFinished):
"""
The operation could not complete because the task was already completed.
"""
class TaskStopped(TaskFinished):
"""
The operation could not complete because the task was stopped.
"""
class TaskFailed(TaskFinished):
"""
The operation could not complete because the task died with an unhandled
error.
"""
class NotPaused(SchedulerError):
"""
This exception is raised when a task is resumed which was not previously
paused.
"""
class _Timer(object):
MAX_SLICE = 0.01
def __init__(self):
self.end = time.time() + self.MAX_SLICE
def __call__(self):
return time.time() >= self.end
_EPSILON = 0.00000001
def _defaultScheduler(x):
from twisted.internet import reactor
return reactor.callLater(_EPSILON, x)
class CooperativeTask(object):
"""
A L{CooperativeTask} is a task object inside a L{Cooperator}, which can be
paused, resumed, and stopped. It can also have its completion (or
termination) monitored.
@see: L{CooperativeTask.cooperate}
@ivar _iterator: the iterator to iterate when this L{CooperativeTask} is
asked to do work.
@ivar _cooperator: the L{Cooperator} that this L{CooperativeTask}
participates in, which is used to re-insert it upon resume.
@ivar _deferreds: the list of L{defer.Deferred}s to fire when this task
completes, fails, or finishes.
@type _deferreds: L{list}
@type _cooperator: L{Cooperator}
@ivar _pauseCount: the number of times that this L{CooperativeTask} has
been paused; if 0, it is running.
@type _pauseCount: L{int}
@ivar _completionState: The completion-state of this L{CooperativeTask}.
C{None} if the task is not yet completed, an instance of L{TaskStopped}
if C{stop} was called to stop this task early, of L{TaskFailed} if the
application code in the iterator raised an exception which caused it to
terminate, and of L{TaskDone} if it terminated normally via raising
L{StopIteration}.
@type _completionState: L{TaskFinished}
"""
def __init__(self, iterator, cooperator):
"""
A private constructor: to create a new L{CooperativeTask}, see
L{Cooperator.cooperate}.
"""
self._iterator = iterator
self._cooperator = cooperator
self._deferreds = []
self._pauseCount = 0
self._completionState = None
self._completionResult = None
cooperator._addTask(self)
def whenDone(self):
"""
Get a L{defer.Deferred} notification of when this task is complete.
@return: a L{defer.Deferred} that fires with the C{iterator} that this
L{CooperativeTask} was created with when the iterator has been
exhausted (i.e. its C{next} method has raised L{StopIteration}), or
fails with the exception raised by C{next} if it raises some other
exception.
@rtype: L{defer.Deferred}
"""
d = defer.Deferred()
if self._completionState is None:
self._deferreds.append(d)
else:
d.callback(self._completionResult)
return d
def pause(self):
"""
Pause this L{CooperativeTask}. Stop doing work until
L{CooperativeTask.resume} is called. If C{pause} is called more than
once, C{resume} must be called an equal number of times to resume this
task.
@raise TaskFinished: if this task has already finished or completed.
"""
self._checkFinish()
self._pauseCount += 1
if self._pauseCount == 1:
self._cooperator._removeTask(self)
def resume(self):
"""
Resume processing of a paused L{CooperativeTask}.
@raise NotPaused: if this L{CooperativeTask} is not paused.
"""
if self._pauseCount == 0:
raise NotPaused()
self._pauseCount -= 1
if self._pauseCount == 0 and self._completionState is None:
self._cooperator._addTask(self)
def _completeWith(self, completionState, deferredResult):
"""
@param completionState: a L{TaskFinished} exception or a subclass
thereof, indicating what exception should be raised when subsequent
operations are performed.
@param deferredResult: the result to fire all the deferreds with.
"""
self._completionState = completionState
self._completionResult = deferredResult
if not self._pauseCount:
self._cooperator._removeTask(self)
# The Deferreds need to be invoked after all this is completed, because
# a Deferred may want to manipulate other tasks in a Cooperator. For
# example, if you call "stop()" on a cooperator in a callback on a
# Deferred returned from whenDone(), this CooperativeTask must be gone
# from the Cooperator by that point so that _completeWith is not
# invoked reentrantly; that would cause these Deferreds to blow up with
# an AlreadyCalledError, or the _removeTask to fail with a ValueError.
for d in self._deferreds:
d.callback(deferredResult)
def stop(self):
"""
Stop further processing of this task.
@raise TaskFinished: if this L{CooperativeTask} has previously
completed, via C{stop}, completion, or failure.
"""
self._checkFinish()
self._completeWith(TaskStopped(), Failure(TaskStopped()))
def _checkFinish(self):
"""
If this task has been stopped, raise the appropriate subclass of
L{TaskFinished}.
"""
if self._completionState is not None:
raise self._completionState
def _oneWorkUnit(self):
"""
Perform one unit of work for this task, retrieving one item from its
iterator, stopping if there are no further items in the iterator, and
pausing if the result was a L{defer.Deferred}.
"""
try:
result = self._iterator.next()
except StopIteration:
self._completeWith(TaskDone(), self._iterator)
except:
self._completeWith(TaskFailed(), Failure())
else:
if isinstance(result, defer.Deferred):
self.pause()
def failLater(f):
self._completeWith(TaskFailed(), f)
result.addCallbacks(lambda result: self.resume(),
failLater)
class Cooperator(object):
"""
Cooperative task scheduler.
"""
def __init__(self,
terminationPredicateFactory=_Timer,
scheduler=_defaultScheduler,
started=True):
"""
Create a scheduler-like object to which iterators may be added.
@param terminationPredicateFactory: A no-argument callable which will
be invoked at the beginning of each step and should return a
no-argument callable which will return False when the step should be
terminated. The default factory is time-based and allows iterators to
run for 1/100th of a second at a time.
@param scheduler: A one-argument callable which takes a no-argument
callable and should invoke it at some future point. This will be used
to schedule each step of this Cooperator.
@param started: A boolean which indicates whether iterators should be
stepped as soon as they are added, or if they will be queued up until
L{Cooperator.start} is called.
"""
self._tasks = []
self._metarator = iter(())
self._terminationPredicateFactory = terminationPredicateFactory
self._scheduler = scheduler
self._delayedCall = None
self._stopped = False
self._started = started
def coiterate(self, iterator, doneDeferred=None):
"""
Add an iterator to the list of iterators this L{Cooperator} is
currently running.
@param doneDeferred: If specified, this will be the Deferred used as
the completion deferred. It is suggested that you use the default,
which creates a new Deferred for you.
@return: a Deferred that will fire when the iterator finishes.
"""
if doneDeferred is None:
doneDeferred = defer.Deferred()
CooperativeTask(iterator, self).whenDone().chainDeferred(doneDeferred)
return doneDeferred
def cooperate(self, iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return CooperativeTask(iterator, self)
def _addTask(self, task):
"""
Add a L{CooperativeTask} object to this L{Cooperator}.
"""
if self._stopped:
self._tasks.append(task) # XXX silly, I know, but _completeWith
# does the inverse
task._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
else:
self._tasks.append(task)
self._reschedule()
def _removeTask(self, task):
"""
Remove a L{CooperativeTask} from this L{Cooperator}.
"""
self._tasks.remove(task)
def _tasksWhileNotStopped(self):
"""
Yield all L{CooperativeTask} objects in a loop as long as this
L{Cooperator}'s termination condition has not been met.
"""
terminator = self._terminationPredicateFactory()
while self._tasks:
for t in self._metarator:
yield t
if terminator():
return
self._metarator = iter(self._tasks)
def _tick(self):
"""
Run one scheduler tick.
"""
self._delayedCall = None
for taskObj in self._tasksWhileNotStopped():
taskObj._oneWorkUnit()
self._reschedule()
_mustScheduleOnStart = False
def _reschedule(self):
if not self._started:
self._mustScheduleOnStart = True
return
if self._delayedCall is None and self._tasks:
self._delayedCall = self._scheduler(self._tick)
def start(self):
"""
Begin scheduling steps.
"""
self._stopped = False
self._started = True
if self._mustScheduleOnStart:
del self._mustScheduleOnStart
self._reschedule()
def stop(self):
"""
Stop scheduling steps. Errback the completion Deferreds of all
iterators which have been added and forget about them.
"""
self._stopped = True
for taskObj in self._tasks:
taskObj._completeWith(SchedulerStopped(),
Failure(SchedulerStopped()))
self._tasks = []
if self._delayedCall is not None:
self._delayedCall.cancel()
self._delayedCall = None
_theCooperator = Cooperator()
def coiterate(iterator):
"""
Cooperatively iterate over the given iterator, dividing runtime between it
and all other iterators which have been passed to this function and not yet
exhausted.
"""
return _theCooperator.coiterate(iterator)
def cooperate(iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return _theCooperator.cooperate(iterator)
class Clock:
"""
Provide a deterministic, easily-controlled implementation of
L{IReactorTime.callLater}. This is commonly useful for writing
deterministic unit tests for code which schedules events using this API.
"""
implements(IReactorTime)
rightNow = 0.0
def __init__(self):
self.calls = []
def seconds(self):
"""
Pretend to be time.time(). This is used internally when an operation
such as L{IDelayedCall.reset} needs to determine a a time value
relative to the current time.
@rtype: C{float}
@return: The time which should be considered the current time.
"""
return self.rightNow
def callLater(self, when, what, *a, **kw):
"""
See L{twisted.internet.interfaces.IReactorTime.callLater}.
"""
dc = base.DelayedCall(self.seconds() + when,
what, a, kw,
self.calls.remove,
lambda c: None,
self.seconds)
self.calls.append(dc)
self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime()))
return dc
def getDelayedCalls(self):
"""
See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
"""
return self.calls
def advance(self, amount):
"""
Move time on this clock forward by the given amount and run whatever
pending calls should be run.
@type amount: C{float}
@param amount: The number of seconds which to advance this clock's
time.
"""
self.rightNow += amount
while self.calls and self.calls[0].getTime() <= self.seconds():
call = self.calls.pop(0)
call.called = 1
call.func(*call.args, **call.kw)
def pump(self, timings):
"""
Advance incrementally by the given set of times.
@type timings: iterable of C{float}
"""
for amount in timings:
self.advance(amount)
def deferLater(clock, delay, callable, *args, **kw):
"""
Call the given function after a certain period of time has passed.
@type clock: L{IReactorTime} provider
@param clock: The object which will be used to schedule the delayed
call.
@type delay: C{float} or C{int}
@param delay: The number of seconds to wait before calling the function.
@param callable: The object to call after the delay.
@param *args: The positional arguments to pass to C{callable}.
@param **kw: The keyword arguments to pass to C{callable}.
@rtype: L{defer.Deferred}
@return: A deferred that fires with the result of the callable when the
specified time has elapsed.
"""
def deferLaterCancel(deferred):
delayedCall.cancel()
d = defer.Deferred(deferLaterCancel)
d.addCallback(lambda ignored: callable(*args, **kw))
delayedCall = clock.callLater(delay, d.callback, None)
return d
__all__ = [
'LoopingCall',
'Clock',
'SchedulerStopped', 'Cooperator', 'coiterate',
'deferLater',
]
| apache-2.0 |
SlimRemix/android_external_chromium_org | mojo/public/tools/bindings/pylib/mojom/generate/template_expander.py | 33 | 2009 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Based on:
# http://src.chromium.org/viewvc/blink/trunk/Source/build/scripts/template_expander.py
import imp
import inspect
import os.path
import sys
# Disable lint check for finding modules:
# pylint: disable=F0401
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("jinja2")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("mojo"), "third_party"))
import jinja2
def ApplyTemplate(mojo_generator, base_dir, path_to_template, params,
filters=None, **kwargs):
template_directory, template_name = os.path.split(path_to_template)
path_to_templates = os.path.join(base_dir, template_directory)
loader = jinja2.FileSystemLoader([path_to_templates])
final_kwargs = dict(mojo_generator.GetJinjaParameters())
final_kwargs.update(kwargs)
jinja_env = jinja2.Environment(loader=loader, keep_trailing_newline=True,
**final_kwargs)
jinja_env.globals.update(mojo_generator.GetGlobals())
if filters:
jinja_env.filters.update(filters)
template = jinja_env.get_template(template_name)
return template.render(params)
def UseJinja(path_to_template, **kwargs):
# Get the directory of our caller's file.
base_dir = os.path.dirname(inspect.getfile(sys._getframe(1)))
def RealDecorator(generator):
def GeneratorInternal(*args, **kwargs2):
parameters = generator(*args, **kwargs2)
return ApplyTemplate(args[0], base_dir, path_to_template, parameters,
**kwargs)
GeneratorInternal.func_name = generator.func_name
return GeneratorInternal
return RealDecorator
| bsd-3-clause |
AustereCuriosity/astropy | astropy/modeling/polynomial.py | 1 | 48617 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from collections import OrderedDict
import numpy as np
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import poly_map_domain, comb
from ..utils import indent, check_broadcast
from ..extern.six.moves import range
from ..units import Quantity
__all__ = [
'Chebyshev1D', 'Chebyshev2D', 'Hermite1D', 'Hermite2D',
'InverseSIP', 'Legendre1D', 'Legendre2D', 'Polynomial1D',
'Polynomial2D', 'SIP', 'OrthoPolynomialBase',
'PolynomialModel'
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
def __getattr__(self, attr):
if self._param_names and attr in self._param_names:
return Parameter(attr, default=0.0, model=self)
raise AttributeError(attr)
def __setattr__(self, attr, value):
# TODO: Support a means of specifying default values for coefficients
# Check for self._ndim first--if it hasn't been defined then the
# instance hasn't been initialized yet and self.param_names probably
# won't work.
# This has to vaguely duplicate the functionality of
# Parameter.__set__.
# TODO: I wonder if there might be a way around that though...
if attr[0] != '_' and self._param_names and attr in self._param_names:
param = Parameter(attr, default=0.0, model=self)
# This is a little hackish, but we can actually reuse the
# Parameter.__set__ method here
param.__set__(self, value)
else:
super(PolynomialBase, self).__setattr__(attr, value)
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(self, degree, n_models=None, model_set_axis=None,
name=None, meta=None, **params):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
super(PolynomialModel, self).__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name,
meta=meta, **params)
def __repr__(self):
return self._format_repr([self.degree])
def __str__(self):
return self._format_str([('Degree', self.degree)])
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append('c{0}'.format(n))
else:
for i in range(self.degree + 1):
names.append('c{0}_{1}'.format(i, 0))
for i in range(1, self.degree + 1):
names.append('c{0}_{1}'.format(0, i))
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
x_window : list or None, optional
range of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
inputs = ('x', 'y')
outputs = ('z',)
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
# TODO: Perhaps some of these other parameters should be properties?
# TODO: An awful lot of the functionality in this method is still
# shared by PolynomialModel; perhaps some of it can be generalized in
# PolynomialBase
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
self.x_domain = x_domain
self.y_domain = y_domain
self.x_window = x_window
self.y_window = y_window
self._param_names = self._generate_coeff_names()
super(OrthoPolynomialBase, self).__init__(
n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr([self.x_degree, self.y_degree])
def __str__(self):
return self._format_str(
[('X-Degree', self.x_degree),
('Y-Degree', self.y_degree)])
def get_num_coeff(self):
"""
Determine how many coefficients are needed
Returns
-------
numc : int
number of coefficients
"""
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = 'c{0}_{1}'.format(i, j)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, 'r' + str(n), 0.)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, 'r' + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, 'r' + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, 'r' + str(i), 0.)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, 'r' + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
def _fcache(self, x, y):
# TODO: Write a docstring explaining the actual purpose of this method
"""To be implemented by subclasses"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, format_info = \
super(OrthoPolynomialBase, self).prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), format_info
class Chebyshev1D(PolynomialModel):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
inputs = ('x',)
outputs = ('y',)
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super(Chebyshev1D, self).__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(PolynomialModel):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
inputs = ('x')
outputs = ('y')
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super(Hermite1D, self).__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super(Hermite2D, self).__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = 2 * x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = 2 * y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Hermite polynomials:
.. math::
H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._hermderiv1d(x, self.x_degree + 1).T
y_deriv = self._hermderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _hermderiv1d(self, x, deg):
"""
Derivative of 1D Hermite series
"""
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x2
for i in range(2, deg + 1):
d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre1D(PolynomialModel):
r"""
Univariate Legendre series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_i(x)`` is the corresponding Legendre polynomial.
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
inputs = ('x',)
outputs = ('y',)
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super(Legendre1D, self).__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def clenshaw(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
nd = nd - 1
c0 = coeffs[-i] - (c1 * (nd - 1)) / nd
c1 = tmp + (c1 * x * (2 * nd - 1)) / nd
return c0 + c1 * x
class Polynomial1D(PolynomialModel):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
inputs = ('x',)
outputs = ('y',)
def __init__(self, degree, domain=[-1, 1], window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super(Polynomial1D, self).__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(Polynomial1D, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=np.float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.unit is None:
return None
else:
return {'x': self.c0.unit / self.c1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = []
for i in range(self.degree + 1):
par = getattr(self, 'c{0}'.format(i))
mapping.append((par.name, outputs_unit['y'] / inputs_unit['x'] ** i))
return OrderedDict(mapping)
class Polynomial2D(PolynomialModel):
"""
2D Polynomial model.
Represents a general polynomial of degree n:
.. math::
P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n
+ c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y
Parameters
----------
degree : int
highest power of the polynomial,
the number of terms is degree+1
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
"""
inputs = ('x', 'y')
outputs = ('z',)
def __init__(self, degree, x_domain=[-1, 1], y_domain=[-1, 1],
x_window=[-1, 1], y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super(Polynomial2D, self).__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
self.x_domain = x_domain
self.y_domain = y_domain
self.x_window = x_window
self.y_window = y_window
def prepare_inputs(self, x, y, **kwargs):
inputs, format_info = \
super(Polynomial2D, self).prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), format_info
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
result = self.multivariate_horner(x, y, invcoeff)
# Special case for degree==0 to ensure that the shape of the output is
# still as expected by the broadcasting rules, even though the x and y
# inputs are not used in the evaluation
if self.degree == 0:
output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
if output_shape:
new_result = np.empty(output_shape)
new_result[:] = result
result = new_result
return result
def fit_deriv(self, x, y, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.ndim == 2:
x = x.flatten()
if y.ndim == 2:
y = y.flatten()
if x.size != y.size:
raise ValueError('Expected x and y to be of equal size')
designx = x[:, None] ** np.arange(self.degree + 1)
designy = y[:, None] ** np.arange(1, self.degree + 1)
designmixed = []
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j <= self.degree:
designmixed.append((x ** i) * (y ** j))
designmixed = np.array(designmixed).T
if designmixed.any():
v = np.hstack([designx, designy, designmixed])
else:
v = np.hstack([designx, designy])
return v
def invlex_coeff(self, coeffs):
invlex_coeffs = []
lencoeff = range(self.degree + 1)
for i in lencoeff:
for j in lencoeff:
if i + j <= self.degree:
name = 'c{0}_{1}'.format(j, i)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
Multivariate Horner's scheme
Parameters
----------
x, y : array
coeff : array of coefficients in inverse lexical order
"""
alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
karr = np.diff(alpha, axis=0)
for n in range(len(karr)):
if karr[n, 1] != 0:
r2 = y * (r0 + r1 + r2)
r1 = np.zeros_like(coeffs[0], subok=False)
else:
r1 = x * (r0 + r1)
r0 = coeffs[n + 1]
return r0 + r1 + r2
@property
def input_units(self):
if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None):
return None
else:
return {'x': self.c0_0.unit / self.c1_0.unit,
'y': self.c0_0.unit / self.c0_1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = []
for i in range(self.degree + 1):
for j in range(self.degree + 1):
if i + j > 2:
continue
par = getattr(self, 'c{0}_{1}'.format(i, j))
mapping.append((par.name, outputs_unit['z'] / inputs_unit['x'] ** i / inputs_unit['y'] ** j))
return OrderedDict(mapping)
class Chebyshev2D(OrthoPolynomialBase):
r"""
Bivariate Chebyshev series..
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y)
where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x and/or y - since
the coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was
specified with units, 2x^2 and -1 would have incompatible units.
"""
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super(Chebyshev2D, self).__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Chebyshev functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Chebyshev polynomials:
.. math::
T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._chebderiv1d(x, self.x_degree + 1).T
y_deriv = self._chebderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _chebderiv1d(self, x, deg):
"""
Derivative of 1D Chebyshev series
"""
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x
for i in range(2, deg + 1):
d[i] = d[i - 1] * x2 - d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre2D(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super(Legendre2D, self).__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (((2 * (n - 1) + 1) * x * kfunc[n - 1] -
(n - 1) * kfunc[n - 2]) / n)
for n in range(2, y_terms):
kfunc[n + x_terms] = ((2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] -
(n - 1) * kfunc[n + x_terms - 2]) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial"""
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
class _SIP1D(PolynomialBase):
"""
This implements the Simple Imaging Polynomial Model (SIP) in 1D.
It's unlikely it will be used in 1D so this class is private
and SIP should be used instead.
"""
inputs = ('u', 'v')
outputs = ('w',)
def __init__(self, order, coeff_prefix, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.order = order
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
super(_SIP1D, self).__init__(n_models=n_models,
model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr(args=[self.order, self.coeff_prefix])
def __str__(self):
return self._format_str(
[('Order', self.order),
('Coeff. Prefix', self.coeff_prefix)])
def evaluate(self, x, y, *coeffs):
# TODO: Rewrite this so that it uses a simpler method of determining
# the matrix based on the number of given coefficients.
mcoef = self._coeff_matrix(self.coeff_prefix, coeffs)
return self._eval_sip(x, y, mcoef)
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one param set
"""
if self.order < 2 or self.order > 9:
raise ValueError("Degree of polynomial must be 2< deg < 9")
nmixed = comb(self.order, ndim)
# remove 3 terms because SIP deg >= 2
numc = self.order * ndim + nmixed - 2
return numc
def _generate_coeff_names(self, coeff_prefix):
names = []
for i in range(2, self.order + 1):
names.append('{0}_{1}_{2}'.format(coeff_prefix, i, 0))
for i in range(2, self.order + 1):
names.append('{0}_{1}_{2}'.format(coeff_prefix, 0, i))
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
names.append('{0}_{1}_{2}'.format(coeff_prefix, i, j))
return names
def _coeff_matrix(self, coeff_prefix, coeffs):
mat = np.zeros((self.order + 1, self.order + 1))
for i in range(2, self.order + 1):
attr = '{0}_{1}_{2}'.format(coeff_prefix, i, 0)
mat[i, 0] = coeffs[self.param_names.index(attr)]
for i in range(2, self.order + 1):
attr = '{0}_{1}_{2}'.format(coeff_prefix, 0, i)
mat[0, i] = coeffs[self.param_names.index(attr)]
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
attr = '{0}_{1}_{2}'.format(coeff_prefix, i, j)
mat[i, j] = coeffs[self.param_names.index(attr)]
return mat
def _eval_sip(self, x, y, coef):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if self.coeff_prefix == 'A':
result = np.zeros(x.shape)
else:
result = np.zeros(y.shape)
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
if i + j > 1 and i + j < self.order + 1:
result = result + coef[i, j] * x ** i * y ** j
return result
class SIP(Model):
"""
Simple Imaging Polynomial (SIP) model.
The SIP convention is used to represent distortions in FITS image headers.
See [1]_ for a description of the SIP convention.
Parameters
----------
crpix : list or ndarray of length(2)
CRPIX values
a_order : int
SIP polynomial order for first axis
b_order : int
SIP order for second axis
a_coeff : dict
SIP coefficients for first axis
b_coeff : dict
SIP coefficients for the second axis
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
References
----------
.. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005 <http://adsabs.harvard.edu/abs/2005ASPC..347..491S>`_
"""
inputs = ('u', 'v')
outputs = ('x', 'y')
def __init__(self, crpix, a_order, b_order, a_coeff={}, b_coeff={},
ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._crpix = crpix
self._a_order = a_order
self._b_order = b_order
self._a_coeff = a_coeff
self._b_coeff = b_coeff
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
self.shift_a = Shift(-crpix[0])
self.shift_b = Shift(-crpix[1])
self.sip1d_a = _SIP1D(a_order, coeff_prefix='A', n_models=n_models,
model_set_axis=model_set_axis, **a_coeff)
self.sip1d_b = _SIP1D(b_order, coeff_prefix='B', n_models=n_models,
model_set_axis=model_set_axis, **b_coeff)
super(SIP, self).__init__(n_models=n_models,
model_set_axis=model_set_axis, name=name,
meta=meta)
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__,
[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b])
def __str__(self):
parts = ['Model: {0}'.format(self.__class__.__name__)]
for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
@property
def inverse(self):
if (self._ap_order is not None and self._bp_order is not None):
return InverseSIP(self._ap_order, self._bp_order,
self._ap_coeff, self._bp_coeff)
else:
raise NotImplementedError("SIP inverse coefficients are not available.")
def evaluate(self, x, y):
u = self.shift_a.evaluate(x, *self.shift_a.param_sets)
v = self.shift_b.evaluate(y, *self.shift_b.param_sets)
f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets)
g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets)
return f, g
class InverseSIP(Model):
"""
Inverse Simple Imaging Polynomial
Parameters
----------
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
"""
inputs = ('x', 'y')
outputs = ('u', 'v')
def __init__(self, ap_order, bp_order, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
# define the 0th term in order to use Polynomial2D
ap_coeff.setdefault('AP_0_0', 0)
bp_coeff.setdefault('BP_0_0', 0)
ap_coeff_params = dict((k.replace('AP_', 'c'), v)
for k, v in ap_coeff.items())
bp_coeff_params = dict((k.replace('BP_', 'c'), v)
for k, v in bp_coeff.items())
self.sip1d_ap = Polynomial2D(degree=ap_order,
model_set_axis=model_set_axis,
**ap_coeff_params)
self.sip1d_bp = Polynomial2D(degree=bp_order,
model_set_axis=model_set_axis,
**bp_coeff_params)
super(InverseSIP, self).__init__(n_models=n_models,
model_set_axis=model_set_axis,
name=name, meta=meta)
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__,
[self.sip1d_ap, self.sip1d_bp])
def __str__(self):
parts = ['Model: {0}'.format(self.__class__.__name__)]
for model in [self.sip1d_ap, self.sip1d_bp]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
def evaluate(self, x, y):
x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets)
y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets)
return x1, y1
| bsd-3-clause |
adaxi/couchpotato | libs/suds/xsd/sxbasic.py | 197 | 22829 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbasic} module provides classes that represent
I{basic} schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.xsd.sxbase import *
from suds.xsd.query import *
from suds.sax import splitPrefix, Namespace
from suds.transport import TransportError
from suds.reader import DocumentReader
from urlparse import urljoin
log = getLogger(__name__)
class RestrictionMatcher:
"""
For use with L{NodeFinder} to match restriction.
"""
def match(self, n):
return isinstance(n, Restriction)
class TypedContent(Content):
"""
Represents any I{typed} content.
"""
def resolve(self, nobuiltin=False):
qref = self.qref()
if qref is None:
return self
key = 'resolved:nb=%s' % nobuiltin
cached = self.cache.get(key)
if cached is not None:
return cached
result = self
query = TypeQuery(qref)
query.history = [self]
log.debug('%s, resolving: %s\n using:%s', self.id, qref, query)
resolved = query.execute(self.schema)
if resolved is None:
log.debug(self.schema)
raise TypeNotFound(qref)
self.cache[key] = resolved
if resolved.builtin():
if nobuiltin:
result = self
else:
result = resolved
else:
result = resolved.resolve(nobuiltin)
return result
def qref(self):
"""
Get the I{type} qualified reference to the referenced xsd type.
This method takes into account simple types defined through
restriction with are detected by determining that self is simple
(len=0) and by finding a restriction child.
@return: The I{type} qualified reference.
@rtype: qref
"""
qref = self.type
if qref is None and len(self) == 0:
ls = []
m = RestrictionMatcher()
finder = NodeFinder(m, 1)
finder.find(self, ls)
if len(ls):
return ls[0].ref
return qref
class Complex(SchemaObject):
"""
Represents an (xsd) schema <xs:complexType/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return (
'attribute',
'attributeGroup',
'sequence',
'all',
'choice',
'complexContent',
'simpleContent',
'any',
'group')
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def mixed(self):
for c in self.rawchildren:
if isinstance(c, SimpleContent) and c.mixed():
return True
return False
class Group(SchemaObject):
"""
Represents an (xsd) schema <xs:group/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return ('sequence', 'all', 'choice')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = GroupQuery(self.ref)
g = query.execute(self.schema)
if g is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(g)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref',)
class AttributeGroup(SchemaObject):
"""
Represents an (xsd) schema <xs:attributeGroup/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return ('attribute', 'attributeGroup')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrGroupQuery(self.ref)
ag = query.execute(self.schema)
if ag is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(ag)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref',)
class Simple(SchemaObject):
"""
Represents an (xsd) schema <xs:simpleType/> node
"""
def childtags(self):
return ('restriction', 'any', 'list',)
def enum(self):
for child, ancestry in self.children():
if isinstance(child, Enumeration):
return True
return False
def mixed(self):
return len(self)
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class List(SchemaObject):
"""
Represents an (xsd) schema <xs:list/> node
"""
def childtags(self):
return ()
def description(self):
return ('name',)
def xslist(self):
return True
class Restriction(SchemaObject):
"""
Represents an (xsd) schema <xs:restriction/> node
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return ('enumeration', 'attribute', 'attributeGroup')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return (midx, deps)
def restriction(self):
return True
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def description(self):
return ('ref',)
class Collection(SchemaObject):
"""
Represents an (xsd) schema collection node:
- sequence
- choice
- all
"""
def childtags(self):
return ('element', 'sequence', 'all', 'choice', 'any', 'group')
class Sequence(Collection):
"""
Represents an (xsd) schema <xs:sequence/> node.
"""
def sequence(self):
return True
class All(Collection):
"""
Represents an (xsd) schema <xs:all/> node.
"""
def all(self):
return True
class Choice(Collection):
"""
Represents an (xsd) schema <xs:choice/> node.
"""
def choice(self):
return True
class ComplexContent(SchemaObject):
"""
Represents an (xsd) schema <xs:complexContent/> node.
"""
def childtags(self):
return ('attribute', 'attributeGroup', 'extension', 'restriction')
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class SimpleContent(SchemaObject):
"""
Represents an (xsd) schema <xs:simpleContent/> node.
"""
def childtags(self):
return ('extension', 'restriction')
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def mixed(self):
return len(self)
class Enumeration(Content):
"""
Represents an (xsd) schema <xs:enumeration/> node
"""
def __init__(self, schema, root):
Content.__init__(self, schema, root)
self.name = root.get('value')
def enum(self):
return True
class Element(TypedContent):
"""
Represents an (xsd) schema <xs:element/> node.
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
a = root.get('form')
if a is not None:
self.form_qualified = ( a == 'qualified' )
a = self.root.get('nillable')
if a is not None:
self.nillable = ( a in ('1', 'true') )
self.implany()
def implany(self):
"""
Set the type as any when implicit.
An implicit <xs:any/> is when an element has not
body and no type defined.
@return: self
@rtype: L{Element}
"""
if self.type is None and \
self.ref is None and \
self.root.isempty():
self.type = self.anytype()
return self
def childtags(self):
return ('attribute', 'simpleType', 'complexType', 'any',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = ElementQuery(self.ref)
e = query.execute(self.schema)
if e is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(e)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref', 'type')
def anytype(self):
""" create an xsd:anyType reference """
p,u = Namespace.xsdns
mp = self.root.findPrefix(u)
if mp is None:
mp = p
self.root.addPrefix(p, u)
return ':'.join((mp, 'anyType'))
class Extension(SchemaObject):
"""
Represents an (xsd) schema <xs:extension/> node.
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return ('attribute',
'attributeGroup',
'sequence',
'all',
'choice',
'group')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def extension(self):
return ( self.ref is not None )
def description(self):
return ('ref',)
class Import(SchemaObject):
"""
Represents an (xsd) schema <xs:import/> node
@cvar locations: A dictionary of namespace locations.
@type locations: dict
@ivar ns: The imported namespace.
@type ns: str
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
@classmethod
def bind(cls, ns, location=None):
"""
Bind a namespace to a schema location (URI).
This is used for imports that don't specify a schemaLocation.
@param ns: A namespace-uri.
@type ns: str
@param location: The (optional) schema location for the
namespace. (default=ns).
@type location: str
"""
if location is None:
location = ns
cls.locations[ns] = location
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ns = (None, root.get('namespace'))
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and import the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, importing ns="%s", location="%s"', self.id, self.ns[1], self.location)
result = self.locate()
if result is None:
if self.location is None:
log.debug('imported schema (%s) not-found', self.ns[1])
else:
result = self.download(options)
log.debug('imported:\n%s', result)
return result
def locate(self):
""" find the schema locally """
if self.ns[1] == self.schema.tns[1]:
return None
else:
return self.schema.locate(self.ns)
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'imported schema (%s) at (%s), failed' % (self.ns[1], url)
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def description(self):
return ('ns', 'location')
class Include(SchemaObject):
"""
Represents an (xsd) schema <xs:include/> node
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, including location="%s"', self.id, self.location)
result = self.download(options)
log.debug('included:\n%s', result)
return result
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
self.__applytns(root)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'include schema at (%s), failed' % url
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def __applytns(self, root):
""" make sure included schema has same tns. """
TNS = 'targetNamespace'
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, '%s mismatch' % TNS
def description(self):
return ('location')
class Attribute(TypedContent):
"""
Represents an (xsd) <attribute/> node
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
self.use = root.get('use', default='')
def childtags(self):
return ('restriction',)
def isattr(self):
return True
def get_default(self):
"""
Gets the <xs:attribute default=""/> attribute value.
@return: The default value for the attribute
@rtype: str
"""
return self.root.get('default', default='')
def optional(self):
return ( self.use != 'required' )
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrQuery(self.ref)
a = query.execute(self.schema)
if a is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(a)
midx = 0
return (midx, deps)
def description(self):
return ('name', 'ref', 'type')
class Any(Content):
"""
Represents an (xsd) <any/> node
"""
def get_child(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) child')
child = Any(self.schema, root)
return (child, [])
def get_attribute(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) attribute')
attribute = Any(self.schema, root)
return (attribute, [])
def any(self):
return True
class Factory:
"""
@cvar tags: A factory to create object objects based on tag.
@type tags: {tag:fn,}
"""
tags =\
{
'import' : Import,
'include' : Include,
'complexType' : Complex,
'group' : Group,
'attributeGroup' : AttributeGroup,
'simpleType' : Simple,
'list' : List,
'element' : Element,
'attribute' : Attribute,
'sequence' : Sequence,
'all' : All,
'choice' : Choice,
'complexContent' : ComplexContent,
'simpleContent' : SimpleContent,
'restriction' : Restriction,
'enumeration' : Enumeration,
'extension' : Extension,
'any' : Any,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An xsd tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, root, schema):
"""
Create an object based on the root tag name.
@param root: An XML root element.
@type root: L{Element}
@param schema: A schema object.
@type schema: L{schema.Schema}
@return: The created object.
@rtype: L{SchemaObject}
"""
fn = cls.tags.get(root.name)
if fn is not None:
return fn(schema, root)
else:
return None
@classmethod
def build(cls, root, schema, filter=('*',)):
"""
Build an xsobject representation.
@param root: An schema XML root.
@type root: L{sax.element.Element}
@param filter: A tag filter.
@type filter: [str,...]
@return: A schema object graph.
@rtype: L{sxbase.SchemaObject}
"""
children = []
for node in root.getChildren(ns=Namespace.xsdns):
if '*' in filter or node.name in filter:
child = cls.create(node, schema)
if child is None:
continue
children.append(child)
c = cls.build(node, schema, child.childtags())
child.rawchildren = c
return children
@classmethod
def collate(cls, children):
imports = []
elements = {}
attributes = {}
types = {}
groups = {}
agrps = {}
for c in children:
if isinstance(c, (Import, Include)):
imports.append(c)
continue
if isinstance(c, Attribute):
attributes[c.qname] = c
continue
if isinstance(c, Element):
elements[c.qname] = c
continue
if isinstance(c, Group):
groups[c.qname] = c
continue
if isinstance(c, AttributeGroup):
agrps[c.qname] = c
continue
types[c.qname] = c
for i in imports:
children.remove(i)
return (children, imports, attributes, elements, types, groups, agrps)
#######################################################
# Static Import Bindings :-(
#######################################################
Import.bind(
'http://schemas.xmlsoap.org/soap/encoding/',
'suds://schemas.xmlsoap.org/soap/encoding/')
Import.bind(
'http://www.w3.org/XML/1998/namespace',
'http://www.w3.org/2001/xml.xsd')
Import.bind(
'http://www.w3.org/2001/XMLSchema',
'http://www.w3.org/2001/XMLSchema.xsd')
| gpl-3.0 |
xodus7/tensorflow | tensorflow/contrib/factorization/python/ops/wals_test.py | 3 | 19947 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WALSMatrixFactorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import json
import numpy as np
from tensorflow.contrib.factorization.python.ops import factorization_ops_test_utils
from tensorflow.contrib.factorization.python.ops import wals as wals_lib
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
class WALSMatrixFactorizationTest(test.TestCase):
INPUT_MATRIX = factorization_ops_test_utils.INPUT_MATRIX
def np_array_to_sparse(self, np_array):
"""Transforms an np.array to a tf.SparseTensor."""
return factorization_ops_test_utils.np_matrix_to_tf_sparse(np_array)
def calculate_loss(self):
"""Calculates the loss of the current (trained) model."""
current_rows = embedding_ops.embedding_lookup(
self._model.get_row_factors(), math_ops.range(self._num_rows),
partition_strategy='div')
current_cols = embedding_ops.embedding_lookup(
self._model.get_col_factors(), math_ops.range(self._num_cols),
partition_strategy='div')
row_wts = embedding_ops.embedding_lookup(
self._row_weights, math_ops.range(self._num_rows),
partition_strategy='div')
col_wts = embedding_ops.embedding_lookup(
self._col_weights, math_ops.range(self._num_cols),
partition_strategy='div')
sp_inputs = self.np_array_to_sparse(self.INPUT_MATRIX)
return factorization_ops_test_utils.calculate_loss(
sp_inputs, current_rows, current_cols, self._regularization_coeff,
self._unobserved_weight, row_wts, col_wts)
# TODO(walidk): Replace with input_reader_utils functions once open sourced.
def remap_sparse_tensor_rows(self, sp_x, row_ids, shape):
"""Remaps the row ids of a tf.SparseTensor."""
old_row_ids, old_col_ids = array_ops.split(
value=sp_x.indices, num_or_size_splits=2, axis=1)
new_row_ids = array_ops.gather(row_ids, old_row_ids)
new_indices = array_ops.concat([new_row_ids, old_col_ids], 1)
return sparse_tensor.SparseTensor(
indices=new_indices, values=sp_x.values, dense_shape=shape)
# TODO(walidk): Add an option to shuffle inputs.
def input_fn(self, np_matrix, batch_size, mode,
project_row=None, projection_weights=None,
remove_empty_rows_columns=False):
"""Returns an input_fn that selects row and col batches from np_matrix.
This simple utility creates an input function from a numpy_array. The
following transformations are performed:
* The empty rows and columns in np_matrix are removed (if
remove_empty_rows_columns is true)
* np_matrix is converted to a SparseTensor.
* The rows of the sparse matrix (and the rows of its transpose) are batched.
* A features dictionary is created, which contains the row / column batches.
In TRAIN mode, one only needs to specify the np_matrix and the batch_size.
In INFER and EVAL modes, one must also provide project_row, a boolean which
specifies whether we are projecting rows or columns.
Args:
np_matrix: A numpy array. The input matrix to use.
batch_size: Integer.
mode: Can be one of model_fn.ModeKeys.{TRAIN, INFER, EVAL}.
project_row: A boolean. Used in INFER and EVAL modes. Specifies whether
to project rows or columns.
projection_weights: A float numpy array. Used in INFER mode. Specifies
the weights to use in the projection (the weights are optional, and
default to 1.).
remove_empty_rows_columns: A boolean. When true, this will remove empty
rows and columns in the np_matrix. Note that this will result in
modifying the indices of the input matrix. The mapping from new indices
to old indices is returned in the form of two numpy arrays.
Returns:
A tuple consisting of:
_fn: A callable. Calling _fn returns a features dict.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
if remove_empty_rows_columns:
np_matrix, nz_row_ids, nz_col_ids = (
factorization_ops_test_utils.remove_empty_rows_columns(np_matrix))
else:
nz_row_ids = np.arange(np.shape(np_matrix)[0])
nz_col_ids = np.arange(np.shape(np_matrix)[1])
def extract_features(row_batch, col_batch, num_rows, num_cols):
row_ids = row_batch[0]
col_ids = col_batch[0]
rows = self.remap_sparse_tensor_rows(
row_batch[1], row_ids, shape=[num_rows, num_cols])
cols = self.remap_sparse_tensor_rows(
col_batch[1], col_ids, shape=[num_cols, num_rows])
features = {
wals_lib.WALSMatrixFactorization.INPUT_ROWS: rows,
wals_lib.WALSMatrixFactorization.INPUT_COLS: cols,
}
return features
def _fn():
num_rows = np.shape(np_matrix)[0]
num_cols = np.shape(np_matrix)[1]
row_ids = math_ops.range(num_rows, dtype=dtypes.int64)
col_ids = math_ops.range(num_cols, dtype=dtypes.int64)
sp_mat = self.np_array_to_sparse(np_matrix)
sp_mat_t = sparse_ops.sparse_transpose(sp_mat)
row_batch = input_lib.batch(
[row_ids, sp_mat],
batch_size=min(batch_size, num_rows),
capacity=10,
enqueue_many=True)
col_batch = input_lib.batch(
[col_ids, sp_mat_t],
batch_size=min(batch_size, num_cols),
capacity=10,
enqueue_many=True)
features = extract_features(row_batch, col_batch, num_rows, num_cols)
if mode == model_fn.ModeKeys.INFER or mode == model_fn.ModeKeys.EVAL:
self.assertTrue(
project_row is not None,
msg='project_row must be specified in INFER or EVAL mode.')
features[wals_lib.WALSMatrixFactorization.PROJECT_ROW] = (
constant_op.constant(project_row))
if mode == model_fn.ModeKeys.INFER and projection_weights is not None:
weights_batch = input_lib.batch(
projection_weights,
batch_size=batch_size,
capacity=10,
enqueue_many=True)
features[wals_lib.WALSMatrixFactorization.PROJECTION_WEIGHTS] = (
weights_batch)
labels = None
return features, labels
return _fn, nz_row_ids, nz_col_ids
@property
def input_matrix(self):
return self.INPUT_MATRIX
@property
def row_steps(self):
return np.ceil(self._num_rows / self.batch_size)
@property
def col_steps(self):
return np.ceil(self._num_cols / self.batch_size)
@property
def batch_size(self):
return 5
@property
def use_cache(self):
return False
@property
def max_sweeps(self):
return None
def setUp(self):
self._num_rows = 5
self._num_cols = 7
self._embedding_dimension = 3
self._unobserved_weight = 0.1
self._num_row_shards = 2
self._num_col_shards = 3
self._regularization_coeff = 0.01
self._col_init = [
# Shard 0.
[[-0.36444709, -0.39077035, -0.32528427],
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]],
# Shard 1.
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# Shard 2.
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]],
]
self._row_weights = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self._col_weights = [[0.1, 0.2, 0.3], [0.4, 0.5], [0.6, 0.7]]
# Values of row and column factors after running one iteration or factor
# updates.
self._row_factors_0 = [[0.097689, -0.219293, -0.020780],
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [[2.4725, -1.2950, -1.9980],
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
self._model = wals_lib.WALSMatrixFactorization(
self._num_rows,
self._num_cols,
self._embedding_dimension,
self._unobserved_weight,
col_init=self._col_init,
regularization_coeff=self._regularization_coeff,
num_row_shards=self._num_row_shards,
num_col_shards=self._num_col_shards,
row_weights=self._row_weights,
col_weights=self._col_weights,
max_sweeps=self.max_sweeps,
use_factors_weights_cache_for_training=self.use_cache,
use_gramian_cache_for_training=self.use_cache)
def test_fit(self):
# Row sweep.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
row_factors = self._model.get_row_factors()
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Col sweep.
# Running fit a second time will resume training from the checkpoint.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True)[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
col_factors = self._model.get_col_factors()
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
def test_predict(self):
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
# Project rows 1 and 4 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[[1, 4], :],
batch_size=2,
mode=model_fn.ModeKeys.INFER,
project_row=True,
projection_weights=[[0.2, 0.5]])[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
projections = self._model.get_projections(proj_input_fn)
projected_rows = list(itertools.islice(projections, 2))
self.assertAllClose(
projected_rows,
[self._row_factors_0[1], self._row_factors_1[1]],
atol=1e-3)
# Project columns 5, 3, 1 from the input matrix.
proj_input_fn = self.input_fn(
np_matrix=self.INPUT_MATRIX[:, [5, 3, 1]],
batch_size=3,
mode=model_fn.ModeKeys.INFER,
project_row=False,
projection_weights=[[0.6, 0.4, 0.2]])[0]
self._model.fit(input_fn=input_fn, steps=self.col_steps)
projections = self._model.get_projections(proj_input_fn)
projected_cols = list(itertools.islice(projections, 3))
self.assertAllClose(
projected_cols,
[self._col_factors_2[0], self._col_factors_1[0],
self._col_factors_0[1]],
atol=1e-3)
def test_eval(self):
# Do a row sweep then evaluate the model on row inputs.
# The evaluate function returns the loss of the projected rows, but since
# projection is idempotent, the eval loss must match the model loss.
input_fn = self.input_fn(np_matrix=self.input_matrix,
batch_size=self.batch_size,
mode=model_fn.ModeKeys.TRAIN,
remove_empty_rows_columns=True,
)[0]
self._model.fit(input_fn=input_fn, steps=self.row_steps)
eval_input_fn_row = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=True,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_row, steps=self._num_rows)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After row update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
# Do a col sweep then evaluate the model on col inputs.
self._model.fit(input_fn=input_fn, steps=self.col_steps)
eval_input_fn_col = self.input_fn(np_matrix=self.input_matrix,
batch_size=1,
mode=model_fn.ModeKeys.EVAL,
project_row=False,
remove_empty_rows_columns=True)[0]
loss = self._model.evaluate(
input_fn=eval_input_fn_col, steps=self._num_cols)['loss']
with self.cached_session():
true_loss = self.calculate_loss()
self.assertNear(
loss, true_loss, err=.001,
msg="""After col update, eval loss = {}, does not match the true
loss = {}.""".format(loss, true_loss))
class WALSMatrixFactorizationTestSweeps(WALSMatrixFactorizationTest):
@property
def max_sweeps(self):
return 2
# We set the column steps to None so that we rely only on max_sweeps to stop
# training.
@property
def col_steps(self):
return None
class WALSMatrixFactorizationTestCached(WALSMatrixFactorizationTest):
@property
def use_cache(self):
return True
class WALSMatrixFactorizaiontTestPaddedInput(WALSMatrixFactorizationTest):
PADDED_INPUT_MATRIX = np.pad(
WALSMatrixFactorizationTest.INPUT_MATRIX,
[(1, 0), (1, 0)], mode='constant')
@property
def input_matrix(self):
return self.PADDED_INPUT_MATRIX
class WALSMatrixFactorizationUnsupportedTest(test.TestCase):
def setUp(self):
pass
def testDistributedWALSUnsupported(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEqual(config.num_worker_replicas, 2)
with self.assertRaises(ValueError):
self._model = wals_lib.WALSMatrixFactorization(1, 1, 1, config=config)
class SweepHookTest(test.TestCase):
def test_sweeps(self):
is_row_sweep_var = variables.Variable(True)
is_sweep_done_var = variables.Variable(False)
init_done = variables.Variable(False)
row_prep_done = variables.Variable(False)
col_prep_done = variables.Variable(False)
row_train_done = variables.Variable(False)
col_train_done = variables.Variable(False)
init_op = state_ops.assign(init_done, True)
row_prep_op = state_ops.assign(row_prep_done, True)
col_prep_op = state_ops.assign(col_prep_done, True)
row_train_op = state_ops.assign(row_train_done, True)
col_train_op = state_ops.assign(col_train_done, True)
train_op = control_flow_ops.no_op()
switch_op = control_flow_ops.group(
state_ops.assign(is_sweep_done_var, False),
state_ops.assign(is_row_sweep_var,
math_ops.logical_not(is_row_sweep_var)))
mark_sweep_done = state_ops.assign(is_sweep_done_var, True)
with self.cached_session() as sess:
sweep_hook = wals_lib._SweepHook(
is_row_sweep_var,
is_sweep_done_var,
init_op,
[row_prep_op],
[col_prep_op],
row_train_op,
col_train_op,
switch_op)
mon_sess = monitored_session._HookedSession(sess, [sweep_hook])
sess.run([variables.global_variables_initializer()])
# Row sweep.
mon_sess.run(train_op)
self.assertTrue(sess.run(init_done),
msg='init op not run by the Sweephook')
self.assertTrue(sess.run(row_prep_done),
msg='row_prep_op not run by the SweepHook')
self.assertTrue(sess.run(row_train_done),
msg='row_train_op not run by the SweepHook')
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Row sweep is not complete but is_row_sweep_var is False.')
# Col sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(sess.run(col_prep_done),
msg='col_prep_op not run by the SweepHook')
self.assertTrue(sess.run(col_train_done),
msg='col_train_op not run by the SweepHook')
self.assertFalse(
sess.run(is_row_sweep_var),
msg='Col sweep is not complete but is_row_sweep_var is True.')
# Row sweep.
mon_sess.run(mark_sweep_done)
mon_sess.run(train_op)
self.assertTrue(
sess.run(is_row_sweep_var),
msg='Col sweep is complete but is_row_sweep_var is False.')
class StopAtSweepHookTest(test.TestCase):
def test_stop(self):
hook = wals_lib._StopAtSweepHook(last_sweep=10)
completed_sweeps = variables.Variable(
8, name=wals_lib.WALSMatrixFactorization.COMPLETED_SWEEPS)
train_op = state_ops.assign_add(completed_sweeps, 1)
hook.begin()
with self.cached_session() as sess:
sess.run([variables.global_variables_initializer()])
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
# completed_sweeps is 9 after running train_op.
self.assertFalse(mon_sess.should_stop())
mon_sess.run(train_op)
# completed_sweeps is 10 after running train_op.
self.assertTrue(mon_sess.should_stop())
if __name__ == '__main__':
test.main()
| apache-2.0 |
Klaudit/wagtail | wagtail/wagtailsites/wagtail_hooks.py | 15 | 1155 | from django.conf.urls import include, url
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Permission
from wagtail.wagtailcore import hooks
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtailsites import urls
@hooks.register('register_admin_urls')
def register_admin_urls():
return [
url(r'^sites/', include(urls, namespace='wagtailsites')),
]
class SitesMenuItem(MenuItem):
def is_shown(self, request):
return (
request.user.has_perm('wagtailcore.add_site')
or request.user.has_perm('wagtailcore.change_site')
or request.user.has_perm('wagtailcore.delete_site')
)
@hooks.register('register_settings_menu_item')
def register_sites_menu_item():
return SitesMenuItem(_('Sites'), urlresolvers.reverse('wagtailsites:index'), classnames='icon icon-site', order=602)
@hooks.register('register_permissions')
def register_permissions():
return Permission.objects.filter(content_type__app_label='wagtailcore',
codename__in=['add_site', 'change_site', 'delete_site'])
| bsd-3-clause |
GRArmstrong/invenio-inspire-ops | modules/bibsched/lib/tasklets/bst_twitter_fetcher.py | 30 | 6546 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Twitter fetcher
In order to schedule fetching tweets you can type at the command line:
$ sudo -u www-data /opt/invenio/bin/bibtasklet -T bst_twitter_fetcher -uadmin -s5m -a "query=YOURQUERY"
"""
## Here we import the Twitter APIs
import twitter
import re
import os
import sys
import tempfile
import time
import sys
## Here are some good Invenio APIs
from invenio.config import CFG_TMPDIR
## BibRecord -> to create MARCXML records
from invenio.bibrecord import record_add_field, record_xml_output
## BibTask -> to manipulate Bibliographic Tasks
from invenio.bibtask import task_low_level_submission, write_message, task_update_progress
## BibDocFile to manipulate documents
from invenio.bibdocfile import check_valid_url
## WebSearch to search for previous tweets
from invenio.search_engine import perform_request_search, get_fieldvalues
_TWITTER_API = twitter.Api()
def get_tweets(query):
"""
This is how simple it is to fetch tweets :-)
"""
## We shall skip tweets that already in the system.
previous_tweets = perform_request_search(p='980__a:"TWEET" 980__b:"%s"' % query, sf='970__a', so='a')
if previous_tweets:
## A bit of an algorithm to retrieve the last Tweet ID that was stored
## in our records
since_id = int(get_fieldvalues(previous_tweets[0], '970__a')[0])
else:
since_id = 0
final_results = []
results = list(_TWITTER_API.Search(query, rpp=100, since_id=since_id).results)
final_results.extend(results)
page = 1
while len(results) == 100: ## We stop if there are less than 100 results per page
page += 1
results = list(_TWITTER_API.Search(query, rpp=100, since_id=since_id, page=page).results)
final_results.extend(results)
return final_results
_RE_GET_HTTP = re.compile("(https?://.+?)(\s|$)")
_RE_TAGS = re.compile("([#@]\w+)")
def tweet_to_record(tweet, query):
"""
Transform a tweet into a record.
@note: you may want to highly customize this.
"""
rec = {}
## Let's normalize the body of the tweet.
text = tweet.text.encode('UTF-8')
text = text.replace('>', '>')
text = text.replace('<', '<')
text = text.replace('"', "'")
text = text.replace('&', '&')
## Let's add the creation date
try:
creation_date = time.strptime(tweet.created_at, '%a, %d %b %Y %H:%M:%S +0000')
except ValueError:
creation_date = time.strptime(tweet.created_at, '%a %b %d %H:%M:%S +0000 %Y')
record_add_field(rec, '260__c', time.strftime('%Y-%m-%dZ%H:%M:%ST', creation_date))
## Let's add the Tweet ID
record_add_field(rec, '970', subfields=[('a', str(tweet.id))])
## Let's add the body of the tweet as an abstract
record_add_field(rec, '520', subfields=[('a', text)])
## Let's re-add the body of the tweet as a title.
record_add_field(rec, '245', subfields=[('a', text)])
## Let's fetch information about the user
try:
user = _TWITTER_API.GetUser(tweet.from_user)
## Let's add the user name as author of the tweet
record_add_field(rec, '100', subfields=[('a', str(user.name.encode('UTF-8')))])
## Let's fetch the icon of the user profile, and let's upload it as
## an image (and an icon of itself)
record_add_field(rec, 'FFT', subfields=[('a', user.profile.image_url.encode('UTF-8')), ('x', user.profile.image_url.encode('UTF-8'))])
except Exception, err:
write_message("WARNING: issue when fetching the user: %s" % err, stream=sys.stderr)
if hasattr(tweet, 'iso_language_code'):
## Let's add the language of the Tweet if available (also this depends)
## on the kind of Twitter API call we used
record_add_field(rec, '045', subfields=[('a', tweet.iso_language_code.encode('UTF-8'))])
## Let's tag this record as a TWEET so that later we can build a collection
## out of these records.
record_add_field(rec, '980', subfields=[('a', 'TWEET'), ('b', query)])
## Some smart manipulations: let's parse out URLs and tags from the body
## of the Tweet.
for url in _RE_GET_HTTP.findall(text):
url = url[0]
record_add_field(rec, '856', '4', subfields=[('u', url)])
for tag in _RE_TAGS.findall(text):
## And here we add the keywords.
record_add_field(rec, '653', '1', subfields=[('a', tag), ('9', 'TWITTER')])
## Finally we shall serialize everything to MARCXML
return record_xml_output(rec)
def bst_twitter_fetcher(query):
"""
Fetch the tweets related to the user and upload them into Invenio.
@param user: the user
"""
## We prepare a temporary MARCXML file to upload.
fd, name = tempfile.mkstemp(suffix='.xml', prefix='tweets', dir=CFG_TMPDIR)
tweets = get_tweets(query)
if tweets:
os.write(fd, """<collection>\n""")
for i, tweet in enumerate(tweets):
## For every tweet we transform it to MARCXML and we dump it in the file.
task_update_progress('DONE: tweet %s out %s' % (i, len(tweets)))
os.write(fd, tweet_to_record(tweet, query))
os.write(fd, """</collection\n>""")
os.close(fd)
## Invenio magic: we schedule an upload of the created MARCXML to be inserted
## ASAP in the system.
task_low_level_submission('bibupload', 'admin', '-i', '-r', name, '-P5')
write_message("Uploaded file %s with %s new tweets about %s" % (name, len(tweets), query))
else:
write_message("No new tweets about %s" % query)
if __name__ == '__main__':
if len(sys.argv) == 2:
bst_twitter_fetcher(sys.argv[1])
else:
print "USAGE: %s TWITTER_QUERY" % sys.argv[0]
sys.exit(1)
| gpl-2.0 |
spacewalkproject/spacewalk | proxy/proxy/pm/rhn_package_manager.py | 1 | 14468 | #!/usr/bin/python2
#
# Copyright (c) 2008--2020 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Authors: Mihai Ibanescu <misa@redhat.com>
# Todd Warner <taw@redhat.com>
#
"""\
Management tool for the Spacewalk Proxy.
This script performs various management operations on the Spacewalk Proxy:
- Creates the local directory structure needed to store local packages
- Uploads packages from a given directory to the RHN servers
- Optionally, once the packages are uploaded, they can be linked to (one or
more) channels, and copied in the local directories for these channels.
- Lists the RHN server's vision on a certain channel
- Checks if the local image of the channel (the local directory) is in sync
with the server's image, and prints the missing packages (or the extra
ones)
- Cache any RPM content locally to avoid needing to download them. This can be
particularly useful if bandwitdth is precious or the connection to the server
is slow.
"""
# system imports
import gzip
import os
from xml.dom import minidom
import sys
import shutil
import xmlrpclib
from optparse import Option, OptionParser
# RHN imports
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnLib import parseUrl
initCFG('proxy.package_manager')
# pylint: disable=E0012, C0413
from rhnpush.uploadLib import UploadError
from rhnpush import uploadLib
from proxy.broker.rhnRepository import computePackagePaths
# globals
PREFIX = 'rhn'
def main():
# Initialize a command-line processing object with a table of options
optionsTable = [
Option('-v', '--verbose', action='count', help='Increase verbosity'),
Option('-d', '--dir', action='store', help='Process packages from this directory'),
Option('-L', '--cache-locally', action='store_true',
help='Locally cache packages so that Proxy will not ever need to '
+ 'download them. Changes nothing on the upstream server.'),
Option('-e', '--from-export', action='store', dest='export_location',
help='Process packages from this channel export. Can only be used '
+ 'with --cache-locally or --copyonly.'),
Option('-c', '--channel', action='append',
help='Channel to operate on. When used with --from-export '
+ 'specifies channels to cache rpms for, else specifies channels '
+ 'that we will be pushing into.'),
Option('-n', '--count', action='store', help='Process this number of headers per call', type='int'),
Option('-l', '--list', action='store_true', help='Only list the specified channels'),
Option('-s', '--sync', action='store_true', help='Check if in sync with the server'),
Option('-p', '--printconf', action='store_true', help='Print the configuration and exit'),
Option('-X', '--exclude', action="append", help="Exclude packages that match this glob expression"),
Option('--newest', action='store_true', help='Only push the files that are newer than the server ones'),
Option('--stdin', action='store_true', help='Read the package names from stdin'),
Option('--nosig', action='store_true', help="Push unsigned packages"),
Option('--username', action='store', help='Use this username to connect to RHN'),
Option('--password', action='store', help='Use this password to connect to RHN'),
Option('--source', action='store_true', help='Upload source package headers'),
Option('--dontcopy', action='store_true', help='Do not copy packages to the local directory'),
Option('--copyonly', action='store_true',
help="Only copy packages; don't reimport. Same as --cache-locally"),
Option('--test', action='store_true', help='Only print the packages to be pushed'),
Option('-N', '--new-cache', action='store_true', help='Create a new username/password cache'),
Option('--no-ssl', action='store_true', help='Turn off SSL (not recommended).'),
Option('--no-session-caching', action='store_true',
help='Disables session-token authentication.'),
Option('-?', '--usage', action='store_true', help="Briefly describe the options"),
]
# Process the command line arguments
optionParser = OptionParser(option_list=optionsTable, usage="USAGE: %prog [OPTION] [<package>]")
options, files = optionParser.parse_args()
upload = UploadClass(options, files=files)
if options.usage:
optionParser.print_usage()
sys.exit(0)
if options.printconf:
CFG.show()
return
if options.list:
upload.list()
return
if options.sync:
upload.checkSync()
return
# It's just an alias to copyonly
if options.cache_locally:
options.copyonly = True
# remeber to process dir option before export, export can overwrite dir
if options.dir:
upload.directory()
if options.export_location:
if not options.copyonly:
upload.die(0, "--from-export can only be used with --cache-locally"
+ " or --copyonly")
if options.source:
upload.die(0, "--from-export cannot be used with --source")
upload.from_export()
if options.stdin:
upload.readStdin()
# if we're going to allow the user to specify packages by dir *and* export
# *and* stdin *and* package list (why not?) then we have to uniquify
# the list afterwards. Sort just for user-friendly display.
upload.files = sorted(list(set(upload.files)))
if options.copyonly:
if not upload.files:
upload.die(0, "Nothing to do; exiting. Try --help")
if options.test:
upload.test()
return
upload.copyonly()
return
if options.exclude:
upload.filter_excludes()
if options.newest:
upload.newest()
if not upload.files:
upload.die(0, "Nothing to do; exiting. Try --help")
if options.test:
upload.test()
return
try:
upload.uploadHeaders()
except UploadError, e:
sys.stderr.write("Upload error: %s\n" % e)
class UploadClass(uploadLib.UploadClass):
# pylint: disable=R0904,W0221
def setURL(self, path='/APP'):
# overloaded for uploadlib.py
if not CFG.RHN_PARENT:
self.die(-1, "rhn_parent not set in the configuration file")
self.url = CFG.RHN_PARENT
scheme = 'http://'
if not self.options.no_ssl and CFG.USE_SSL:
# i.e., --no-ssl overrides the USE_SSL config variable.
scheme = 'https://'
self.url = CFG.RHN_PARENT or ''
self.url = parseUrl(self.url)[1].split(':')[0]
self.url = scheme + self.url + path
# The rpm names in channel exports have been changed to be something like
# rhn-package-XXXXXX.rpm, but that's okay because the rpm headers are
# still intact and that's what we use to determine the destination
# filename. Read the channel xml to determin what rpms to cache if the
# --channel option was used.
def from_export(self):
export_dir = self.options.export_location
self.warn(1, "Getting files from channel export: ", export_dir)
if not self.options.channel:
self.warn(2, "No channels specified, getting all files")
# If no channels specified just upload all rpms from
# all the rpm directories
for hash_dir in uploadLib.listdir(os.path.join(
export_dir, "rpms")):
self.options.dir = hash_dir
self.directory()
return
# else...
self.warn(2, "Getting only files in these channels",
self.options.channel)
# Read the channel xml and add only packages that are in these channels
package_set = set([])
for channel in self.options.channel:
xml_path = os.path.join(export_dir, "channels", channel,
"channel.xml.gz")
if not os.access(xml_path, os.R_OK):
self.warn(0, "Could not find metadata for channel %s, skipping..." % channel)
print "Could not find metadata for channel %s, skipping..." % channel
continue
dom = minidom.parse(gzip.open(xml_path))
# will only ever be the one
dom_channel = dom.getElementsByTagName('rhn-channel')[0]
package_set.update(dom_channel.attributes['packages']
.value.encode('ascii', 'ignore').split())
# Try to find relevent packages in the export
for hash_dir in uploadLib.listdir(os.path.join(export_dir, "rpms")):
for rpm in uploadLib.listdir(hash_dir):
# rpm name minus '.rpm'
if os.path.basename(rpm)[:-4] in package_set:
self.files.append(rpm)
def setServer(self):
try:
uploadLib.UploadClass.setServer(self)
uploadLib.call(self.server.packages.no_op, raise_protocol_error=True)
except xmlrpclib.ProtocolError, e:
if e.errcode == 404:
self.use_session = False
self.setURL('/XP')
uploadLib.UploadClass.setServer(self)
else:
raise
def authenticate(self):
if self.use_session:
uploadLib.UploadClass.authenticate(self)
else:
self.setUsernamePassword()
def setProxyUsernamePassword(self):
# overloaded for uploadlib.py
self.proxyUsername = CFG.HTTP_PROXY_USERNAME
self.proxyPassword = CFG.HTTP_PROXY_PASSWORD
def setProxy(self):
# overloaded for uploadlib.py
self.proxy = CFG.HTTP_PROXY
def setCAchain(self):
# overloaded for uploadlib.py
self.ca_chain = CFG.CA_CHAIN
def setNoChannels(self):
self.channels = self.options.channel
def checkSync(self):
# set the org
self.setOrg()
# set the URL
self.setURL()
# set the channels
self.setChannels()
# set the server
self.setServer()
self.authenticate()
# List the channel's contents
channel_list = self._listChannel()
# Convert it to a hash of hashes
remotePackages = {}
for channel in self.channels:
remotePackages[channel] = {}
for p in channel_list:
channelName = p[-1]
key = tuple(p[:5])
remotePackages[channelName][key] = None
missing = []
for package in channel_list:
found = False
# if the package includes checksum info
if self.use_checksum_paths:
checksum = package[6]
else:
checksum = None
packagePaths = computePackagePaths(package, 0, PREFIX, checksum)
for packagePath in packagePaths:
packagePath = "%s/%s" % (CFG.PKG_DIR, packagePath)
if os.path.isfile(packagePath):
found = True
break
if not found:
missing.append([package, packagePaths[0]])
if not missing:
self.warn(0, "Channels in sync with the server")
return
for package, packagePath in missing:
channelName = package[-1]
self.warn(0, "Missing: %s in channel %s (path %s)" % (
rpmPackageName(package), channelName, packagePath))
def processPackage(self, package, filename, checksum=None):
if self.options.dontcopy:
return
if not CFG.PKG_DIR:
self.warn(1, "No package directory specified; will not copy the package")
return
if not self.use_checksum_paths:
checksum = None
# Copy file to the prefered path
packagePath = computePackagePaths(package, self.options.source,
PREFIX, checksum)[0]
packagePath = "%s/%s" % (CFG.PKG_DIR, packagePath)
destdir = os.path.dirname(packagePath)
if not os.path.isdir(destdir):
# Try to create it
try:
os.makedirs(destdir, 0755)
except OSError:
self.warn(0, "Could not create directory %s" % destdir)
return
self.warn(1, "Copying %s to %s" % (filename, packagePath))
shutil.copy2(filename, packagePath)
# Make sure the file permissions are set correctly, so that Apache can
# see the files
os.chmod(packagePath, 0644)
def _listChannelSource(self):
self.die(1, "Listing source rpms not supported")
def copyonly(self):
# Set the forcing factor
self.setForce()
# Relative directory
self.setRelativeDir()
# Set the count
self.setCount()
if not CFG.PKG_DIR:
self.warn(1, "No package directory specified; will not copy the package")
return
# Safe because proxy X can't be activated against Spacewalk / Satellite
# < X.
self.use_checksum_paths = True
for filename in self.files:
fileinfo = self._processFile(filename,
relativeDir=self.relativeDir,
source=self.options.source,
nosig=self.options.nosig)
self.processPackage(fileinfo['nvrea'], filename,
fileinfo['checksum'])
def rpmPackageName(p):
return "%s-%s-%s.%s.rpm" % (p[0], p[1], p[2], p[4])
if __name__ == '__main__':
try:
main()
except SystemExit, se:
sys.exit(se.code)
| gpl-2.0 |
mhsiddiqui/django-error-report | error_report/models.py | 1 | 1751 | from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from django.utils import version
from error_report.settings import ERROR_DETAIL_SETTINGS
if version.get_complete_version() < (1, 10):
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
class Error(models.Model):
"""
Model for storing the individual errors.
"""
kind = models.CharField(_('type'),
null=True, blank=True, max_length=128, db_index=True
)
info = models.TextField(
null=False,
)
data = models.TextField(
blank=True, null=True
)
path = models.URLField(
null=True, blank=True,
)
when = models.DateTimeField(
null=False, auto_now_add=True, db_index=True,
)
html = models.TextField(
null=True, blank=True,
)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""
Meta information for the model.
"""
verbose_name = _('Error')
verbose_name_plural = _('Errors')
def __unicode__(self):
"""
String representation of the object.
"""
return "%s: %s" % (self.kind, self.info)
def html_iframe(self):
"""
Return an Iframe for Viewing Error detail in django admin
:return:
"""
return \
format_html('<iframe style="width: 100%; height: {}px;" src="{}"></iframe>',
ERROR_DETAIL_SETTINGS.get('ERROR_DETAIL_HEIGHT', 1000),
reverse('error-html-link', kwargs={'error': self.id}))
| mit |
KiChjang/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorservo.py | 4 | 13000 | from __future__ import print_function
import base64
import json
import os
import subprocess
import tempfile
import threading
import traceback
import uuid
from six import ensure_str
from mozprocess import ProcessHandler
from tools.serve.serve import make_hosts_file
from .base import (ConnectionlessProtocol,
RefTestImplementation,
crashtest_result_converter,
testharness_result_converter,
reftest_result_converter,
TimedRunner,
WdspecExecutor,
WdspecProtocol)
from .process import ProcessTestExecutor
from ..browsers.base import browser_command
from ..process import cast_env
from ..webdriver_server import ServoDriverServer
pytestrunner = None
webdriver = None
def write_hosts_file(config):
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(make_hosts_file(config, "127.0.0.1"))
return hosts_path
def build_servo_command(test, test_url_func, browser, binary, pause_after_test, debug_info,
extra_args=None, debug_opts="replace-surrogates"):
args = [
"--hard-fail", "-u", "Servo/wptrunner",
"-z", test_url_func(test),
]
if debug_opts:
args += ["-Z", debug_opts]
for stylesheet in browser.user_stylesheets:
args += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).items():
args += ["--pref", "%s=%s" % (pref, value)]
if browser.ca_certificate_path:
args += ["--certificate-path", browser.ca_certificate_path]
if extra_args:
args += extra_args
args += browser.binary_args
debug_args, command = browser_command(binary, args, debug_info)
if pause_after_test:
command.remove("-z")
return debug_args + command
class ServoTestharnessExecutor(ProcessTestExecutor):
convert_result = testharness_result_converter
def __init__(self, logger, browser, server_config, timeout_multiplier=1, debug_info=None,
pause_after_test=False, **kwargs):
ProcessTestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.result_data = None
self.result_flag = None
self.protocol = ConnectionlessProtocol(self, browser)
self.hosts_path = write_hosts_file(server_config)
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
ProcessTestExecutor.teardown(self)
def do_test(self, test):
self.result_data = None
self.result_flag = threading.Event()
self.command = build_servo_command(test,
self.test_url,
self.browser,
self.binary,
self.pause_after_test,
self.debug_info)
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
onFinish=self.on_finish,
env=cast_env(env),
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=cast_env(env))
try:
timeout = test.timeout * self.timeout_multiplier
# Now wait to get the output we expect, or until we reach the timeout
if not self.interactive and not self.pause_after_test:
wait_timeout = timeout + 5
self.result_flag.wait(wait_timeout)
else:
wait_timeout = None
self.proc.wait()
proc_is_running = True
if self.result_flag.is_set():
if self.result_data is not None:
result = self.convert_result(test, self.result_data)
else:
self.proc.wait()
result = (test.result_cls("CRASH", None), [])
proc_is_running = False
else:
result = (test.result_cls("TIMEOUT", None), [])
if proc_is_running:
if self.pause_after_test:
self.logger.info("Pausing until the browser exits")
self.proc.wait()
else:
self.proc.kill()
except: # noqa
self.proc.kill()
raise
return result
def on_output(self, line):
prefix = "ALERT: RESULT: "
line = line.decode("utf8", "replace")
if line.startswith(prefix):
self.result_data = json.loads(line[len(prefix):])
self.result_flag.set()
else:
if self.interactive:
print(line)
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
def on_finish(self):
self.result_flag.set()
class TempFilename(object):
def __init__(self, directory):
self.directory = directory
self.path = None
def __enter__(self):
self.path = os.path.join(self.directory, str(uuid.uuid4()))
return self.path
def __exit__(self, *args, **kwargs):
try:
os.unlink(self.path)
except OSError:
pass
class ServoRefTestExecutor(ProcessTestExecutor):
convert_result = reftest_result_converter
def __init__(self, logger, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False,
**kwargs):
ProcessTestExecutor.__init__(self,
logger,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ConnectionlessProtocol(self, browser)
self.screenshot_cache = screenshot_cache
self.implementation = RefTestImplementation(self)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = write_hosts_file(server_config)
def reset(self):
self.implementation.reset()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
os.rmdir(self.tempdir)
ProcessTestExecutor.teardown(self)
def screenshot(self, test, viewport_size, dpi, page_ranges):
with TempFilename(self.tempdir) as output_path:
extra_args = ["--exit",
"--output=%s" % output_path,
"--resolution", viewport_size or "800x600"]
debug_opts = "disable-text-aa,load-webfonts-synchronously,replace-surrogates"
if dpi:
extra_args += ["--device-pixel-ratio", dpi]
self.command = build_servo_command(test,
self.test_url,
self.browser,
self.binary,
False,
self.debug_info,
extra_args,
debug_opts)
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=cast_env(env))
try:
self.proc.run()
timeout = test.timeout * self.timeout_multiplier + 5
rv = self.proc.wait(timeout=timeout)
except KeyboardInterrupt:
self.proc.kill()
raise
else:
self.proc = subprocess.Popen(self.command,
env=cast_env(env))
try:
rv = self.proc.wait()
except KeyboardInterrupt:
self.proc.kill()
raise
if rv is None:
self.proc.kill()
return False, ("EXTERNAL-TIMEOUT", None)
if rv != 0 or not os.path.exists(output_path):
return False, ("CRASH", None)
with open(output_path, "rb") as f:
# Might need to strip variable headers or something here
data = f.read()
return True, [ensure_str(base64.b64encode(data))]
def do_test(self, test):
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def on_output(self, line):
line = line.decode("utf8", "replace")
if self.interactive:
print(line)
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
class ServoDriverProtocol(WdspecProtocol):
server_cls = ServoDriverServer
class ServoWdspecExecutor(WdspecExecutor):
protocol_cls = ServoDriverProtocol
class ServoTimedRunner(TimedRunner):
def run_func(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
def set_timeout(self):
pass
class ServoCrashtestExecutor(ProcessTestExecutor):
convert_result = crashtest_result_converter
def __init__(self, logger, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False,
**kwargs):
ProcessTestExecutor.__init__(self,
logger,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.protocol = ConnectionlessProtocol(self, browser)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = write_hosts_file(server_config)
def do_test(self, test):
timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
else None)
test_url = self.test_url(test)
# We want to pass the full test object into build_servo_command,
# so stash it in the class
self.test = test
success, data = ServoTimedRunner(self.logger, self.do_crashtest, self.protocol,
test_url, timeout, self.extra_timeout).run()
# Ensure that no processes hang around if they timeout.
self.proc.kill()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_crashtest(self, protocol, url, timeout):
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
command = build_servo_command(self.test,
self.test_url,
self.browser,
self.binary,
False,
self.debug_info,
extra_args=["-x"])
if not self.interactive:
self.proc = ProcessHandler(command,
env=cast_env(env),
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(command, env=cast_env(env))
self.proc.wait()
if self.proc.poll() >= 0:
return {"status": "PASS", "message": None}
return {"status": "CRASH", "message": None}
| mpl-2.0 |
vipulroxx/sympy | sympy/polys/tests/test_densebasic.py | 97 | 21462 | """Tests for dense recursive polynomials' basic tools. """
from sympy.polys.densebasic import (
dup_LC, dmp_LC,
dup_TC, dmp_TC,
dmp_ground_LC, dmp_ground_TC,
dmp_true_LT,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dup_strip, dmp_strip,
dmp_validate,
dup_reverse,
dup_copy, dmp_copy,
dup_normal, dmp_normal,
dup_convert, dmp_convert,
dup_from_sympy, dmp_from_sympy,
dup_nth, dmp_nth, dmp_ground_nth,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground_p, dmp_ground,
dmp_negative_p, dmp_positive_p,
dmp_zeros, dmp_grounds,
dup_from_dict, dup_from_raw_dict,
dup_to_dict, dup_to_raw_dict,
dmp_from_dict, dmp_to_dict,
dmp_swap, dmp_permute,
dmp_nest, dmp_raise,
dup_deflate, dmp_deflate,
dup_multi_deflate, dmp_multi_deflate,
dup_inflate, dmp_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd,
dmp_list_terms, dmp_apply_pairs,
dup_slice,
dup_random,
)
from sympy.polys.specialpolys import f_polys
from sympy.polys.domains import ZZ, QQ
from sympy.polys.rings import ring
from sympy.core.singleton import S
from sympy.utilities.pytest import raises
from sympy import oo
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = [ f.to_dense() for f in f_polys() ]
def test_dup_LC():
assert dup_LC([], ZZ) == 0
assert dup_LC([2, 3, 4, 5], ZZ) == 2
def test_dup_TC():
assert dup_TC([], ZZ) == 0
assert dup_TC([2, 3, 4, 5], ZZ) == 5
def test_dmp_LC():
assert dmp_LC([[]], ZZ) == []
assert dmp_LC([[2, 3, 4], [5]], ZZ) == [2, 3, 4]
assert dmp_LC([[[]]], ZZ) == [[]]
assert dmp_LC([[[2], [3, 4]], [[5]]], ZZ) == [[2], [3, 4]]
def test_dmp_TC():
assert dmp_TC([[]], ZZ) == []
assert dmp_TC([[2, 3, 4], [5]], ZZ) == [5]
assert dmp_TC([[[]]], ZZ) == [[]]
assert dmp_TC([[[2], [3, 4]], [[5]]], ZZ) == [[5]]
def test_dmp_ground_LC():
assert dmp_ground_LC([[]], 1, ZZ) == 0
assert dmp_ground_LC([[2, 3, 4], [5]], 1, ZZ) == 2
assert dmp_ground_LC([[[]]], 2, ZZ) == 0
assert dmp_ground_LC([[[2], [3, 4]], [[5]]], 2, ZZ) == 2
def test_dmp_ground_TC():
assert dmp_ground_TC([[]], 1, ZZ) == 0
assert dmp_ground_TC([[2, 3, 4], [5]], 1, ZZ) == 5
assert dmp_ground_TC([[[]]], 2, ZZ) == 0
assert dmp_ground_TC([[[2], [3, 4]], [[5]]], 2, ZZ) == 5
def test_dmp_true_LT():
assert dmp_true_LT([[]], 1, ZZ) == ((0, 0), 0)
assert dmp_true_LT([[7]], 1, ZZ) == ((0, 0), 7)
assert dmp_true_LT([[1, 0]], 1, ZZ) == ((0, 1), 1)
assert dmp_true_LT([[1], []], 1, ZZ) == ((1, 0), 1)
assert dmp_true_LT([[1, 0], []], 1, ZZ) == ((1, 1), 1)
def test_dup_degree():
assert dup_degree([]) == -oo
assert dup_degree([1]) == 0
assert dup_degree([1, 0]) == 1
assert dup_degree([1, 0, 0, 0, 1]) == 4
def test_dmp_degree():
assert dmp_degree([[]], 1) == -oo
assert dmp_degree([[[]]], 2) == -oo
assert dmp_degree([[1]], 1) == 0
assert dmp_degree([[2], [1]], 1) == 1
def test_dmp_degree_in():
assert dmp_degree_in([[[]]], 0, 2) == -oo
assert dmp_degree_in([[[]]], 1, 2) == -oo
assert dmp_degree_in([[[]]], 2, 2) == -oo
assert dmp_degree_in([[[1]]], 0, 2) == 0
assert dmp_degree_in([[[1]]], 1, 2) == 0
assert dmp_degree_in([[[1]]], 2, 2) == 0
assert dmp_degree_in(f_4, 0, 2) == 9
assert dmp_degree_in(f_4, 1, 2) == 12
assert dmp_degree_in(f_4, 2, 2) == 8
assert dmp_degree_in(f_6, 0, 2) == 4
assert dmp_degree_in(f_6, 1, 2) == 4
assert dmp_degree_in(f_6, 2, 2) == 6
assert dmp_degree_in(f_6, 3, 3) == 3
raises(IndexError, lambda: dmp_degree_in([[1]], -5, 1))
def test_dmp_degree_list():
assert dmp_degree_list([[[[ ]]]], 3) == (-oo, -oo, -oo, -oo)
assert dmp_degree_list([[[[1]]]], 3) == ( 0, 0, 0, 0)
assert dmp_degree_list(f_0, 2) == (2, 2, 2)
assert dmp_degree_list(f_1, 2) == (3, 3, 3)
assert dmp_degree_list(f_2, 2) == (5, 3, 3)
assert dmp_degree_list(f_3, 2) == (5, 4, 7)
assert dmp_degree_list(f_4, 2) == (9, 12, 8)
assert dmp_degree_list(f_5, 2) == (3, 3, 3)
assert dmp_degree_list(f_6, 3) == (4, 4, 6, 3)
def test_dup_strip():
assert dup_strip([]) == []
assert dup_strip([0]) == []
assert dup_strip([0, 0, 0]) == []
assert dup_strip([1]) == [1]
assert dup_strip([0, 1]) == [1]
assert dup_strip([0, 0, 0, 1]) == [1]
assert dup_strip([1, 2, 0]) == [1, 2, 0]
assert dup_strip([0, 1, 2, 0]) == [1, 2, 0]
assert dup_strip([0, 0, 0, 1, 2, 0]) == [1, 2, 0]
def test_dmp_strip():
assert dmp_strip([0, 1, 0], 0) == [1, 0]
assert dmp_strip([[]], 1) == [[]]
assert dmp_strip([[], []], 1) == [[]]
assert dmp_strip([[], [], []], 1) == [[]]
assert dmp_strip([[[]]], 2) == [[[]]]
assert dmp_strip([[[]], [[]]], 2) == [[[]]]
assert dmp_strip([[[]], [[]], [[]]], 2) == [[[]]]
assert dmp_strip([[[1]]], 2) == [[[1]]]
assert dmp_strip([[[]], [[1]]], 2) == [[[1]]]
assert dmp_strip([[[]], [[1]], [[]]], 2) == [[[1]], [[]]]
def test_dmp_validate():
assert dmp_validate([]) == ([], 0)
assert dmp_validate([0, 0, 0, 1, 0]) == ([1, 0], 0)
assert dmp_validate([[[]]]) == ([[[]]], 2)
assert dmp_validate([[0], [], [0], [1], [0]]) == ([[1], []], 1)
raises(ValueError, lambda: dmp_validate([[0], 0, [0], [1], [0]]))
def test_dup_reverse():
assert dup_reverse([1, 2, 0, 3]) == [3, 0, 2, 1]
assert dup_reverse([1, 2, 3, 0]) == [3, 2, 1]
def test_dup_copy():
f = [ZZ(1), ZZ(0), ZZ(2)]
g = dup_copy(f)
g[0], g[2] = ZZ(7), ZZ(0)
assert f != g
def test_dmp_copy():
f = [[ZZ(1)], [ZZ(2), ZZ(0)]]
g = dmp_copy(f, 1)
g[0][0], g[1][1] = ZZ(7), ZZ(1)
assert f != g
def test_dup_normal():
assert dup_normal([0, 0, 2, 1, 0, 11, 0], ZZ) == \
[ZZ(2), ZZ(1), ZZ(0), ZZ(11), ZZ(0)]
def test_dmp_normal():
assert dmp_normal([[0], [], [0, 2, 1], [0], [11], []], 1, ZZ) == \
[[ZZ(2), ZZ(1)], [], [ZZ(11)], []]
def test_dup_convert():
K0, K1 = ZZ['x'], ZZ
f = [K0(1), K0(2), K0(0), K0(3)]
assert dup_convert(f, K0, K1) == \
[ZZ(1), ZZ(2), ZZ(0), ZZ(3)]
def test_dmp_convert():
K0, K1 = ZZ['x'], ZZ
f = [[K0(1)], [K0(2)], [], [K0(3)]]
assert dmp_convert(f, 1, K0, K1) == \
[[ZZ(1)], [ZZ(2)], [], [ZZ(3)]]
def test_dup_from_sympy():
assert dup_from_sympy([S(1), S(2)], ZZ) == \
[ZZ(1), ZZ(2)]
assert dup_from_sympy([S(1)/2, S(3)], QQ) == \
[QQ(1, 2), QQ(3, 1)]
def test_dmp_from_sympy():
assert dmp_from_sympy([[S(1), S(2)], [S(0)]], 1, ZZ) == \
[[ZZ(1), ZZ(2)], []]
assert dmp_from_sympy([[S(1)/2, S(2)]], 1, QQ) == \
[[QQ(1, 2), QQ(2, 1)]]
def test_dup_nth():
assert dup_nth([1, 2, 3], 0, ZZ) == 3
assert dup_nth([1, 2, 3], 1, ZZ) == 2
assert dup_nth([1, 2, 3], 2, ZZ) == 1
assert dup_nth([1, 2, 3], 9, ZZ) == 0
raises(IndexError, lambda: dup_nth([3, 4, 5], -1, ZZ))
def test_dmp_nth():
assert dmp_nth([[1], [2], [3]], 0, 1, ZZ) == [3]
assert dmp_nth([[1], [2], [3]], 1, 1, ZZ) == [2]
assert dmp_nth([[1], [2], [3]], 2, 1, ZZ) == [1]
assert dmp_nth([[1], [2], [3]], 9, 1, ZZ) == []
raises(IndexError, lambda: dmp_nth([[3], [4], [5]], -1, 1, ZZ))
def test_dmp_ground_nth():
assert dmp_ground_nth([[]], (0, 0), 1, ZZ) == 0
assert dmp_ground_nth([[1], [2], [3]], (0, 0), 1, ZZ) == 3
assert dmp_ground_nth([[1], [2], [3]], (1, 0), 1, ZZ) == 2
assert dmp_ground_nth([[1], [2], [3]], (2, 0), 1, ZZ) == 1
assert dmp_ground_nth([[1], [2], [3]], (2, 1), 1, ZZ) == 0
assert dmp_ground_nth([[1], [2], [3]], (3, 0), 1, ZZ) == 0
raises(IndexError, lambda: dmp_ground_nth([[3], [4], [5]], (2, -1), 1, ZZ))
def test_dmp_zero_p():
assert dmp_zero_p([], 0) is True
assert dmp_zero_p([[]], 1) is True
assert dmp_zero_p([[[]]], 2) is True
assert dmp_zero_p([[[1]]], 2) is False
def test_dmp_zero():
assert dmp_zero(0) == []
assert dmp_zero(2) == [[[]]]
def test_dmp_one_p():
assert dmp_one_p([1], 0, ZZ) is True
assert dmp_one_p([[1]], 1, ZZ) is True
assert dmp_one_p([[[1]]], 2, ZZ) is True
assert dmp_one_p([[[12]]], 2, ZZ) is False
def test_dmp_one():
assert dmp_one(0, ZZ) == [ZZ(1)]
assert dmp_one(2, ZZ) == [[[ZZ(1)]]]
def test_dmp_ground_p():
assert dmp_ground_p([], 0, 0) is True
assert dmp_ground_p([[]], 0, 1) is True
assert dmp_ground_p([[]], 1, 1) is False
assert dmp_ground_p([[ZZ(1)]], 1, 1) is True
assert dmp_ground_p([[[ZZ(2)]]], 2, 2) is True
assert dmp_ground_p([[[ZZ(2)]]], 3, 2) is False
assert dmp_ground_p([[[ZZ(3)], []]], 3, 2) is False
assert dmp_ground_p([], None, 0) is True
assert dmp_ground_p([[]], None, 1) is True
assert dmp_ground_p([ZZ(1)], None, 0) is True
assert dmp_ground_p([[[ZZ(1)]]], None, 2) is True
assert dmp_ground_p([[[ZZ(3)], []]], None, 2) is False
def test_dmp_ground():
assert dmp_ground(ZZ(0), 2) == [[[]]]
assert dmp_ground(ZZ(7), -1) == ZZ(7)
assert dmp_ground(ZZ(7), 0) == [ZZ(7)]
assert dmp_ground(ZZ(7), 2) == [[[ZZ(7)]]]
def test_dmp_zeros():
assert dmp_zeros(4, 0, ZZ) == [[], [], [], []]
assert dmp_zeros(0, 2, ZZ) == []
assert dmp_zeros(1, 2, ZZ) == [[[[]]]]
assert dmp_zeros(2, 2, ZZ) == [[[[]]], [[[]]]]
assert dmp_zeros(3, 2, ZZ) == [[[[]]], [[[]]], [[[]]]]
assert dmp_zeros(3, -1, ZZ) == [0, 0, 0]
def test_dmp_grounds():
assert dmp_grounds(ZZ(7), 0, 2) == []
assert dmp_grounds(ZZ(7), 1, 2) == [[[[7]]]]
assert dmp_grounds(ZZ(7), 2, 2) == [[[[7]]], [[[7]]]]
assert dmp_grounds(ZZ(7), 3, 2) == [[[[7]]], [[[7]]], [[[7]]]]
assert dmp_grounds(ZZ(7), 3, -1) == [7, 7, 7]
def test_dmp_negative_p():
assert dmp_negative_p([[[]]], 2, ZZ) is False
assert dmp_negative_p([[[1], [2]]], 2, ZZ) is False
assert dmp_negative_p([[[-1], [2]]], 2, ZZ) is True
def test_dmp_positive_p():
assert dmp_positive_p([[[]]], 2, ZZ) is False
assert dmp_positive_p([[[1], [2]]], 2, ZZ) is True
assert dmp_positive_p([[[-1], [2]]], 2, ZZ) is False
def test_dup_from_to_dict():
assert dup_from_raw_dict({}, ZZ) == []
assert dup_from_dict({}, ZZ) == []
assert dup_to_raw_dict([]) == {}
assert dup_to_dict([]) == {}
assert dup_to_raw_dict([], ZZ, zero=True) == {0: ZZ(0)}
assert dup_to_dict([], ZZ, zero=True) == {(0,): ZZ(0)}
f = [3, 0, 0, 2, 0, 0, 0, 0, 8]
g = {8: 3, 5: 2, 0: 8}
h = {(8,): 3, (5,): 2, (0,): 8}
assert dup_from_raw_dict(g, ZZ) == f
assert dup_from_dict(h, ZZ) == f
assert dup_to_raw_dict(f) == g
assert dup_to_dict(f) == h
R, x,y = ring("x,y", ZZ)
K = R.to_domain()
f = [R(3), R(0), R(2), R(0), R(0), R(8)]
g = {5: R(3), 3: R(2), 0: R(8)}
h = {(5,): R(3), (3,): R(2), (0,): R(8)}
assert dup_from_raw_dict(g, K) == f
assert dup_from_dict(h, K) == f
assert dup_to_raw_dict(f) == g
assert dup_to_dict(f) == h
def test_dmp_from_to_dict():
assert dmp_from_dict({}, 1, ZZ) == [[]]
assert dmp_to_dict([[]], 1) == {}
assert dmp_to_dict([], 0, ZZ, zero=True) == {(0,): ZZ(0)}
assert dmp_to_dict([[]], 1, ZZ, zero=True) == {(0, 0): ZZ(0)}
f = [[3], [], [], [2], [], [], [], [], [8]]
g = {(8, 0): 3, (5, 0): 2, (0, 0): 8}
assert dmp_from_dict(g, 1, ZZ) == f
assert dmp_to_dict(f, 1) == g
def test_dmp_swap():
f = dmp_normal([[1, 0, 0], [], [1, 0], [], [1]], 1, ZZ)
g = dmp_normal([[1, 0, 0, 0, 0], [1, 0, 0], [1]], 1, ZZ)
assert dmp_swap(f, 1, 1, 1, ZZ) == f
assert dmp_swap(f, 0, 1, 1, ZZ) == g
assert dmp_swap(g, 0, 1, 1, ZZ) == f
raises(IndexError, lambda: dmp_swap(f, -1, -7, 1, ZZ))
def test_dmp_permute():
f = dmp_normal([[1, 0, 0], [], [1, 0], [], [1]], 1, ZZ)
g = dmp_normal([[1, 0, 0, 0, 0], [1, 0, 0], [1]], 1, ZZ)
assert dmp_permute(f, [0, 1], 1, ZZ) == f
assert dmp_permute(g, [0, 1], 1, ZZ) == g
assert dmp_permute(f, [1, 0], 1, ZZ) == g
assert dmp_permute(g, [1, 0], 1, ZZ) == f
def test_dmp_nest():
assert dmp_nest(ZZ(1), 2, ZZ) == [[[1]]]
assert dmp_nest([[1]], 0, ZZ) == [[1]]
assert dmp_nest([[1]], 1, ZZ) == [[[1]]]
assert dmp_nest([[1]], 2, ZZ) == [[[[1]]]]
def test_dmp_raise():
assert dmp_raise([], 2, 0, ZZ) == [[[]]]
assert dmp_raise([[1]], 0, 1, ZZ) == [[1]]
assert dmp_raise([[1, 2, 3], [], [2, 3]], 2, 1, ZZ) == \
[[[[1]], [[2]], [[3]]], [[[]]], [[[2]], [[3]]]]
def test_dup_deflate():
assert dup_deflate([], ZZ) == (1, [])
assert dup_deflate([2], ZZ) == (1, [2])
assert dup_deflate([1, 2, 3], ZZ) == (1, [1, 2, 3])
assert dup_deflate([1, 0, 2, 0, 3], ZZ) == (2, [1, 2, 3])
assert dup_deflate(dup_from_raw_dict({7: 1, 1: 1}, ZZ), ZZ) == \
(1, [1, 0, 0, 0, 0, 0, 1, 0])
assert dup_deflate(dup_from_raw_dict({7: 1, 0: 1}, ZZ), ZZ) == \
(7, [1, 1])
assert dup_deflate(dup_from_raw_dict({7: 1, 3: 1}, ZZ), ZZ) == \
(1, [1, 0, 0, 0, 1, 0, 0, 0])
assert dup_deflate(dup_from_raw_dict({7: 1, 4: 1}, ZZ), ZZ) == \
(1, [1, 0, 0, 1, 0, 0, 0, 0])
assert dup_deflate(dup_from_raw_dict({8: 1, 4: 1}, ZZ), ZZ) == \
(4, [1, 1, 0])
assert dup_deflate(dup_from_raw_dict({8: 1}, ZZ), ZZ) == \
(8, [1, 0])
assert dup_deflate(dup_from_raw_dict({7: 1}, ZZ), ZZ) == \
(7, [1, 0])
assert dup_deflate(dup_from_raw_dict({1: 1}, ZZ), ZZ) == \
(1, [1, 0])
def test_dmp_deflate():
assert dmp_deflate([[]], 1, ZZ) == ((1, 1), [[]])
assert dmp_deflate([[2]], 1, ZZ) == ((1, 1), [[2]])
f = [[1, 0, 0], [], [1, 0], [], [1]]
assert dmp_deflate(f, 1, ZZ) == ((2, 1), [[1, 0, 0], [1, 0], [1]])
def test_dup_multi_deflate():
assert dup_multi_deflate(([2],), ZZ) == (1, ([2],))
assert dup_multi_deflate(([], []), ZZ) == (1, ([], []))
assert dup_multi_deflate(([1, 2, 3],), ZZ) == (1, ([1, 2, 3],))
assert dup_multi_deflate(([1, 0, 2, 0, 3],), ZZ) == (2, ([1, 2, 3],))
assert dup_multi_deflate(([1, 0, 2, 0, 3], [2, 0, 0]), ZZ) == \
(2, ([1, 2, 3], [2, 0]))
assert dup_multi_deflate(([1, 0, 2, 0, 3], [2, 1, 0]), ZZ) == \
(1, ([1, 0, 2, 0, 3], [2, 1, 0]))
def test_dmp_multi_deflate():
assert dmp_multi_deflate(([[]],), 1, ZZ) == \
((1, 1), ([[]],))
assert dmp_multi_deflate(([[]], [[]]), 1, ZZ) == \
((1, 1), ([[]], [[]]))
assert dmp_multi_deflate(([[1]], [[]]), 1, ZZ) == \
((1, 1), ([[1]], [[]]))
assert dmp_multi_deflate(([[1]], [[2]]), 1, ZZ) == \
((1, 1), ([[1]], [[2]]))
assert dmp_multi_deflate(([[1]], [[2, 0]]), 1, ZZ) == \
((1, 1), ([[1]], [[2, 0]]))
assert dmp_multi_deflate(([[2, 0]], [[2, 0]]), 1, ZZ) == \
((1, 1), ([[2, 0]], [[2, 0]]))
assert dmp_multi_deflate(
([[2]], [[2, 0, 0]]), 1, ZZ) == ((1, 2), ([[2]], [[2, 0]]))
assert dmp_multi_deflate(
([[2, 0, 0]], [[2, 0, 0]]), 1, ZZ) == ((1, 2), ([[2, 0]], [[2, 0]]))
assert dmp_multi_deflate(([2, 0, 0], [1, 0, 4, 0, 1]), 0, ZZ) == \
((2,), ([2, 0], [1, 4, 1]))
f = [[1, 0, 0], [], [1, 0], [], [1]]
g = [[1, 0, 1, 0], [], [1]]
assert dmp_multi_deflate((f,), 1, ZZ) == \
((2, 1), ([[1, 0, 0], [1, 0], [1]],))
assert dmp_multi_deflate((f, g), 1, ZZ) == \
((2, 1), ([[1, 0, 0], [1, 0], [1]],
[[1, 0, 1, 0], [1]]))
def test_dup_inflate():
assert dup_inflate([], 17, ZZ) == []
assert dup_inflate([1, 2, 3], 1, ZZ) == [1, 2, 3]
assert dup_inflate([1, 2, 3], 2, ZZ) == [1, 0, 2, 0, 3]
assert dup_inflate([1, 2, 3], 3, ZZ) == [1, 0, 0, 2, 0, 0, 3]
assert dup_inflate([1, 2, 3], 4, ZZ) == [1, 0, 0, 0, 2, 0, 0, 0, 3]
raises(IndexError, lambda: dup_inflate([1, 2, 3], 0, ZZ))
def test_dmp_inflate():
assert dmp_inflate([1], (3,), 0, ZZ) == [1]
assert dmp_inflate([[]], (3, 7), 1, ZZ) == [[]]
assert dmp_inflate([[2]], (1, 2), 1, ZZ) == [[2]]
assert dmp_inflate([[2, 0]], (1, 1), 1, ZZ) == [[2, 0]]
assert dmp_inflate([[2, 0]], (1, 2), 1, ZZ) == [[2, 0, 0]]
assert dmp_inflate([[2, 0]], (1, 3), 1, ZZ) == [[2, 0, 0, 0]]
assert dmp_inflate([[1, 0, 0], [1], [1, 0]], (2, 1), 1, ZZ) == \
[[1, 0, 0], [], [1], [], [1, 0]]
raises(IndexError, lambda: dmp_inflate([[]], (-3, 7), 1, ZZ))
def test_dmp_exclude():
assert dmp_exclude([[[]]], 2, ZZ) == ([], [[[]]], 2)
assert dmp_exclude([[[7]]], 2, ZZ) == ([], [[[7]]], 2)
assert dmp_exclude([1, 2, 3], 0, ZZ) == ([], [1, 2, 3], 0)
assert dmp_exclude([[1], [2, 3]], 1, ZZ) == ([], [[1], [2, 3]], 1)
assert dmp_exclude([[1, 2, 3]], 1, ZZ) == ([0], [1, 2, 3], 0)
assert dmp_exclude([[1], [2], [3]], 1, ZZ) == ([1], [1, 2, 3], 0)
assert dmp_exclude([[[1, 2, 3]]], 2, ZZ) == ([0, 1], [1, 2, 3], 0)
assert dmp_exclude([[[1]], [[2]], [[3]]], 2, ZZ) == ([1, 2], [1, 2, 3], 0)
def test_dmp_include():
assert dmp_include([1, 2, 3], [], 0, ZZ) == [1, 2, 3]
assert dmp_include([1, 2, 3], [0], 0, ZZ) == [[1, 2, 3]]
assert dmp_include([1, 2, 3], [1], 0, ZZ) == [[1], [2], [3]]
assert dmp_include([1, 2, 3], [0, 1], 0, ZZ) == [[[1, 2, 3]]]
assert dmp_include([1, 2, 3], [1, 2], 0, ZZ) == [[[1]], [[2]], [[3]]]
def test_dmp_inject():
R, x,y = ring("x,y", ZZ)
K = R.to_domain()
assert dmp_inject([], 0, K) == ([[[]]], 2)
assert dmp_inject([[]], 1, K) == ([[[[]]]], 3)
assert dmp_inject([R(1)], 0, K) == ([[[1]]], 2)
assert dmp_inject([[R(1)]], 1, K) == ([[[[1]]]], 3)
assert dmp_inject([R(1), 2*x + 3*y + 4], 0, K) == ([[[1]], [[2], [3, 4]]], 2)
f = [3*x**2 + 7*x*y + 5*y**2, 2*x, R(0), x*y**2 + 11]
g = [[[3], [7, 0], [5, 0, 0]], [[2], []], [[]], [[1, 0, 0], [11]]]
assert dmp_inject(f, 0, K) == (g, 2)
def test_dmp_eject():
R, x,y = ring("x,y", ZZ)
K = R.to_domain()
assert dmp_eject([[[]]], 2, K) == []
assert dmp_eject([[[[]]]], 3, K) == [[]]
assert dmp_eject([[[1]]], 2, K) == [R(1)]
assert dmp_eject([[[[1]]]], 3, K) == [[R(1)]]
assert dmp_eject([[[1]], [[2], [3, 4]]], 2, K) == [R(1), 2*x + 3*y + 4]
f = [3*x**2 + 7*x*y + 5*y**2, 2*x, R(0), x*y**2 + 11]
g = [[[3], [7, 0], [5, 0, 0]], [[2], []], [[]], [[1, 0, 0], [11]]]
assert dmp_eject(g, 2, K) == f
def test_dup_terms_gcd():
assert dup_terms_gcd([], ZZ) == (0, [])
assert dup_terms_gcd([1, 0, 1], ZZ) == (0, [1, 0, 1])
assert dup_terms_gcd([1, 0, 1, 0], ZZ) == (1, [1, 0, 1])
def test_dmp_terms_gcd():
assert dmp_terms_gcd([[]], 1, ZZ) == ((0, 0), [[]])
assert dmp_terms_gcd([1, 0, 1, 0], 0, ZZ) == ((1,), [1, 0, 1])
assert dmp_terms_gcd([[1], [], [1], []], 1, ZZ) == ((1, 0), [[1], [], [1]])
assert dmp_terms_gcd(
[[1, 0], [], [1]], 1, ZZ) == ((0, 0), [[1, 0], [], [1]])
assert dmp_terms_gcd(
[[1, 0], [1, 0, 0], [], []], 1, ZZ) == ((2, 1), [[1], [1, 0]])
def test_dmp_list_terms():
assert dmp_list_terms([[[]]], 2, ZZ) == [((0, 0, 0), 0)]
assert dmp_list_terms([[[1]]], 2, ZZ) == [((0, 0, 0), 1)]
assert dmp_list_terms([1, 2, 4, 3, 5], 0, ZZ) == \
[((4,), 1), ((3,), 2), ((2,), 4), ((1,), 3), ((0,), 5)]
assert dmp_list_terms([[1], [2, 4], [3, 5, 0]], 1, ZZ) == \
[((2, 0), 1), ((1, 1), 2), ((1, 0), 4), ((0, 2), 3), ((0, 1), 5)]
f = [[2, 0, 0, 0], [1, 0, 0], []]
assert dmp_list_terms(f, 1, ZZ, order='lex') == [((2, 3), 2), ((1, 2), 1)]
assert dmp_list_terms(
f, 1, ZZ, order='grlex') == [((2, 3), 2), ((1, 2), 1)]
f = [[2, 0, 0, 0], [1, 0, 0, 0, 0, 0], []]
assert dmp_list_terms(f, 1, ZZ, order='lex') == [((2, 3), 2), ((1, 5), 1)]
assert dmp_list_terms(
f, 1, ZZ, order='grlex') == [((1, 5), 1), ((2, 3), 2)]
def test_dmp_apply_pairs():
h = lambda a, b: a*b
assert dmp_apply_pairs([1, 2, 3], [4, 5, 6], h, [], 0, ZZ) == [4, 10, 18]
assert dmp_apply_pairs([2, 3], [4, 5, 6], h, [], 0, ZZ) == [10, 18]
assert dmp_apply_pairs([1, 2, 3], [5, 6], h, [], 0, ZZ) == [10, 18]
assert dmp_apply_pairs(
[[1, 2], [3]], [[4, 5], [6]], h, [], 1, ZZ) == [[4, 10], [18]]
assert dmp_apply_pairs(
[[1, 2], [3]], [[4], [5, 6]], h, [], 1, ZZ) == [[8], [18]]
assert dmp_apply_pairs(
[[1], [2, 3]], [[4, 5], [6]], h, [], 1, ZZ) == [[5], [18]]
def test_dup_slice():
f = [1, 2, 3, 4]
assert dup_slice(f, 0, 0, ZZ) == []
assert dup_slice(f, 0, 1, ZZ) == [4]
assert dup_slice(f, 0, 2, ZZ) == [3, 4]
assert dup_slice(f, 0, 3, ZZ) == [2, 3, 4]
assert dup_slice(f, 0, 4, ZZ) == [1, 2, 3, 4]
assert dup_slice(f, 0, 4, ZZ) == f
assert dup_slice(f, 0, 9, ZZ) == f
assert dup_slice(f, 1, 0, ZZ) == []
assert dup_slice(f, 1, 1, ZZ) == []
assert dup_slice(f, 1, 2, ZZ) == [3, 0]
assert dup_slice(f, 1, 3, ZZ) == [2, 3, 0]
assert dup_slice(f, 1, 4, ZZ) == [1, 2, 3, 0]
assert dup_slice([1, 2], 0, 3, ZZ) == [1, 2]
def test_dup_random():
f = dup_random(0, -10, 10, ZZ)
assert dup_degree(f) == 0
assert all(-10 <= c <= 10 for c in f)
f = dup_random(1, -20, 20, ZZ)
assert dup_degree(f) == 1
assert all(-20 <= c <= 20 for c in f)
f = dup_random(2, -30, 30, ZZ)
assert dup_degree(f) == 2
assert all(-30 <= c <= 30 for c in f)
f = dup_random(3, -40, 40, ZZ)
assert dup_degree(f) == 3
assert all(-40 <= c <= 40 for c in f)
| bsd-3-clause |
assefay/inasafe | extras/xml_tools.py | 11 | 7777 | """Basic XML utilities based on minidom - the built in Document Object Model
"""
import sys
from xml.dom import minidom, Node
from safe.common.utilities import verify
def print_tree(n, indent=0):
while n:
#print 'nodeType', n.nodeType, Node.ELEMENT_NODE
#if n.nodeType != Node.ELEMENT_NODE:
# break
print ' '*indent,\
'Node name: "%s",' %n.nodeName,\
'Node type: "%s",' %n.nodeType,\
'Node value: "%s"' %str(n.nodeValue).strip()
print_tree(n.firstChild, indent+4)
n = n.nextSibling
def pretty_print_tree(n, indent=0):
print n
def parse(fid):
"""Parse XML file descriptor and return DOM object.
"""
# FIXME (OLE): XML code should be validated against the DTD
#validate(fid, handler)
#doc = minidom.parse(fid, make_parser())
fid.seek(0)
doc = minidom.parse(fid)
return doc
def get_elements(nodelist):
"""Return list of nodes that are ELEMENT_NODE
"""
element_list = []
for node in nodelist:
if node.nodeType == Node.ELEMENT_NODE:
element_list.append(node)
return element_list
def get_text(nodelist):
"""Return a concatenation of text fields from list of nodes
"""
s = ''
for node in nodelist:
if node.nodeType == Node.TEXT_NODE:
s += node.nodeValue + ', '
if len(s)>0: s = s[:-2]
return s
def remove_whitespace(s):
"""Remove excess whitespace including newlines from string
"""
import string
words = s.split() # Split on whitespace
return string.join(words)
#return s.replace('\n', '')
#s.translate(string.maketrans)
#----------------------------
# XML object model
#----------------------------
class XML_element(dict):
def __init__(self,
tag=None,
value=None,
version='1.0',
encoding='iso-8859-1'):
"""
value can be either
* An XML_element
* a list of XML_value
* a text string
"""
if isinstance(value, XML_element):
value = [value]
self.value = value
if tag is None:
tag = '?xml version="%s" encoding="%s"?' %(version, encoding)
self.root_element = True
else:
self.root_element = False
self.tag = tag
# FIXME: It might be better to represent these objects
# in a proper dictionary format with
# {tag: value, ...}
# No, tried that - it removes any notion of ordering.
def __add__(self, other):
return str(self) + str(other)
def __radd__(self, other):
return str(other) + str(self) #Python swaps self and other
def __repr__(self):
return str(self)
def __str__(self, indent=0):
"""String representation of XML element
"""
if self.root_element is True:
increment = 0
else:
increment = 4
s = tab = ' '*indent
s += '<%s>' %self.tag
if isinstance(self.value, basestring):
s += remove_whitespace(self.value)
else:
s += '\n'
for e in self.value:
s += e.__str__(indent+increment)
s += tab
if self.root_element is False:
s += '</%s>\n' %self.tag
return s
def __getitem__(self, key):
"""Return sub-tree starting at element with tag equal to specified key
If node is terminal, its text value will be returned instead of itself.
This will allow expressions such as
xmlobject['datafile']['accountable'] == 'Jane Sexton'
If more than one element matches the given key a list of all
matches will be returned
"""
result = []
for node in self.value:
if node.tag == key:
#print 'node tag = %s, node value = %s' %(node.tag, node.value)
if isinstance(node.value, basestring):
result.append(str(node.value))
#return node.value
else:
result.append(node)
#return node
#print 'result', result
if len(result) == 0:
return None
if len(result) == 1:
return result[0]
if len(result) > 1:
return result
def has_key(self, key):
found = False
for node in self.value:
if node.tag == key:
found = True
return found
def keys(self):
return [str(node.tag) for node in self.value]
def pretty_print(self, indent=0):
"""Print the document without tags using indentation
"""
s = tab = ' '*indent
s += '%s: ' %self.tag
if isinstance(self.value, basestring):
s += self.value
else:
s += '\n'
for e in self.value:
s += e.pretty_print(indent+4)
s += '\n'
return s
def xml2object(xml, verbose=False):
"""Generate XML object model from XML file or XML text
This is the inverse operation to the __str__ representation
(up to whitespace).
Input xml can be either an
* xml file
* open xml file object
Return XML_document instance.
"""
# FIXME - can we allow xml to be string?
# This would depend on minidom's parse function
# Input tests
if isinstance(xml, basestring):
fid = open(xml)
else:
fid = xml
try:
dom = parse(fid)
except Exception, e:
# Throw filename into dom exception
msg = 'XML file "%s" could not be parsed.\n' %fid.name
msg += 'Error message from parser: "%s"' %str(e)
raise Exception, msg
try:
xml_object = dom2object(dom)
except Exception, e:
msg = 'Could not convert %s into XML object.\n' %fid.name
msg += str(e)
raise Exception, msg
return xml_object
def dom2object(node):
"""Convert DOM representation to XML_object hierarchy.
"""
value = []
textnode_encountered = None
for n in node.childNodes:
if n.nodeType == 3:
# Child is a text element - omit the dom tag #text and
# go straight to the text value.
# Note - only the last text value will be recorded
msg = 'Text element has child nodes - this shouldn\'t happen'
verify(len(n.childNodes) == 0, msg)
x = n.nodeValue.strip()
if len(x) == 0:
# Skip empty text children
continue
textnode_encountered = value = x
else:
# XML element
if textnode_encountered is not None:
msg = 'A text node was followed by a non-text tag. This is not allowed.\n'
msg += 'Offending text node: "%s" ' %str(textnode_encountered)
msg += 'was followed by node named: "<%s>"' %str(n.nodeName)
raise Exception, msg
value.append(dom2object(n))
# Deal with empty elements
if len(value) == 0: value = ''
if node.nodeType == 9:
# Root node (document)
tag = None
else:
# Normal XML node
tag = node.nodeName
X = XML_element(tag=tag,
value=value)
return X
#=================== Useful print statement
#if n.nodeType == 3 and str(n.nodeValue).strip() == '':
# pass
#else:
# print 'Node name: "%s",' %n.nodeName,\
# 'Node type: "%s",' %n.nodeType,\
# 'Node value: "%s",' %str(n.nodeValue).strip(),\
# 'Node children: %d' %len(n.childNodes)
| gpl-3.0 |
CollabQ/CollabQ | vendor/django/contrib/auth/management/commands/createsuperuser.py | 14 | 5719 | """
Management utility to create superusers.
"""
import getpass
import os
import re
import sys
from optparse import make_option
from django.contrib.auth.models import User
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
RE_VALID_USERNAME = re.compile('\w+$')
EMAIL_RE = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}$', re.IGNORECASE) # domain
def is_valid_email(value):
if not EMAIL_RE.search(value):
raise exceptions.ValidationError(_('Enter a valid e-mail address.'))
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--username', dest='username', default=None,
help='Specifies the username for the superuser.'),
make_option('--email', dest='email', default=None,
help='Specifies the email address for the superuser.'),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind. ' \
'You must use --username and --email with --noinput, and ' \
'superusers created with --noinput will not be able to log in ' \
'until they\'re given a valid password.'),
)
help = 'Used to create a superuser.'
def handle(self, *args, **options):
username = options.get('username', None)
email = options.get('email', None)
interactive = options.get('interactive')
# Do quick and dirty validation if --noinput
if not interactive:
if not username or not email:
raise CommandError("You must use --username and --email with --noinput.")
if not RE_VALID_USERNAME.match(username):
raise CommandError("Invalid username. Use only letters, digits, and underscores")
try:
is_valid_email(email)
except exceptions.ValidationError:
raise CommandError("Invalid email address.")
password = ''
# Try to determine the current system user's username to use as a default.
try:
import pwd
default_username = pwd.getpwuid(os.getuid())[0].replace(' ', '').lower()
except (ImportError, KeyError):
# KeyError will be raised by getpwuid() if there is no
# corresponding entry in the /etc/passwd file (a very restricted
# chroot environment, for example).
default_username = ''
# Determine whether the default username is taken, so we don't display
# it as an option.
if default_username:
try:
User.objects.get(username=default_username)
except User.DoesNotExist:
pass
else:
default_username = ''
# Prompt for username/email/password. Enclose this whole thing in a
# try/except to trap for a keyboard interrupt and exit gracefully.
if interactive:
try:
# Get a username
while 1:
if not username:
input_msg = 'Username'
if default_username:
input_msg += ' (Leave blank to use %r)' % default_username
username = raw_input(input_msg + ': ')
if default_username and username == '':
username = default_username
if not RE_VALID_USERNAME.match(username):
sys.stderr.write("Error: That username is invalid. Use only letters, digits and underscores.\n")
username = None
continue
try:
User.objects.get(username=username)
except User.DoesNotExist:
break
else:
sys.stderr.write("Error: That username is already taken.\n")
username = None
# Get an email
while 1:
if not email:
email = raw_input('E-mail address: ')
try:
is_valid_email(email)
except exceptions.ValidationError:
sys.stderr.write("Error: That e-mail address is invalid.\n")
email = None
else:
break
# Get a password
while 1:
if not password:
password = getpass.getpass()
password2 = getpass.getpass('Password (again): ')
if password != password2:
sys.stderr.write("Error: Your passwords didn't match.\n")
password = None
continue
if password.strip() == '':
sys.stderr.write("Error: Blank passwords aren't allowed.\n")
password = None
continue
break
except KeyboardInterrupt:
sys.stderr.write("\nOperation cancelled.\n")
sys.exit(1)
User.objects.create_superuser(username, email, password)
print "Superuser created successfully."
| apache-2.0 |
jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_comment06.py | 8 | 1308 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'comment06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_comment('A1', 'Some text')
worksheet.write_comment('A2', 'Some text')
worksheet.write_comment('A3', 'Some text', {'visible': True})
worksheet.write_comment('A4', 'Some text')
worksheet.write_comment('A5', 'Some text')
worksheet.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
carmark/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py | 11 | 114290 | ## @file
# This file is used to create a database used by build tool
#
# Copyright (c) 2008 - 2011, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import sqlite3
import os
import os.path
import pickle
import uuid
import Common.EdkLogger as EdkLogger
import Common.GlobalData as GlobalData
from Common.String import *
from Common.DataType import *
from Common.Misc import *
from types import *
from CommonDataClass.CommonClass import SkuInfoClass
from MetaDataTable import *
from MetaFileTable import *
from MetaFileParser import *
from BuildClassObject import *
## Platform build information from DSC file
#
# This class is used to retrieve information stored in database and convert them
# into PlatformBuildClassObject form for easier use for AutoGen.
#
class DscBuildData(PlatformBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : "FixedAtBuild",
MODEL_PCD_PATCHABLE_IN_MODULE : "PatchableInModule",
MODEL_PCD_FEATURE_FLAG : "FeatureFlag",
MODEL_PCD_DYNAMIC : "Dynamic",
MODEL_PCD_DYNAMIC_DEFAULT : "Dynamic",
MODEL_PCD_DYNAMIC_HII : "DynamicHii",
MODEL_PCD_DYNAMIC_VPD : "DynamicVpd",
MODEL_PCD_DYNAMIC_EX : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_DEFAULT : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_HII : "DynamicExHii",
MODEL_PCD_DYNAMIC_EX_VPD : "DynamicExVpd",
}
# dict used to convert part of [Defines] to members of DscBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_DSC_DEFINES_PLATFORM_NAME : "_PlatformName",
TAB_DSC_DEFINES_PLATFORM_GUID : "_Guid",
TAB_DSC_DEFINES_PLATFORM_VERSION : "_Version",
TAB_DSC_DEFINES_DSC_SPECIFICATION : "_DscSpecification",
#TAB_DSC_DEFINES_OUTPUT_DIRECTORY : "_OutputDirectory",
#TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES : "_SupArchList",
#TAB_DSC_DEFINES_BUILD_TARGETS : "_BuildTargets",
#TAB_DSC_DEFINES_SKUID_IDENTIFIER : "_SkuName",
#TAB_DSC_DEFINES_FLASH_DEFINITION : "_FlashDefinition",
TAB_DSC_DEFINES_BUILD_NUMBER : "_BuildNumber",
TAB_DSC_DEFINES_MAKEFILE_NAME : "_MakefileName",
TAB_DSC_DEFINES_BS_BASE_ADDRESS : "_BsBaseAddress",
TAB_DSC_DEFINES_RT_BASE_ADDRESS : "_RtBaseAddress",
#TAB_DSC_DEFINES_RFC_LANGUAGES : "_RFCLanguages",
#TAB_DSC_DEFINES_ISO_LANGUAGES : "_ISOLanguages",
}
# used to compose dummy library class name for those forced library instances
_NullLibraryNumber = 0
## Constructor of DscBuildData
#
# Initialize object of DscBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform (not used for DscBuildData)
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDataBase, Arch='COMMON', Target=None, Toolchain=None):
self.MetaFile = FilePath
self._RawData = RawData
self._Bdb = BuildDataBase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Clear()
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of DscBuildData to None
def _Clear(self):
self._Header = None
self._PlatformName = None
self._Guid = None
self._Version = None
self._DscSpecification = None
self._OutputDirectory = None
self._SupArchList = None
self._BuildTargets = None
self._SkuName = None
self._FlashDefinition = None
self._BuildNumber = None
self._MakefileName = None
self._BsBaseAddress = None
self._RtBaseAddress = None
self._SkuIds = None
self._Modules = None
self._LibraryInstances = None
self._LibraryClasses = None
self._Pcds = None
self._BuildOptions = None
self._LoadFixAddress = None
self._RFCLanguages = None
self._ISOLanguages = None
self._VpdToolGuid = None
self.__Macros = None
## Get current effective macros
def _GetMacros(self):
if self.__Macros == None:
self.__Macros = {}
self.__Macros.update(GlobalData.gPlatformDefines)
self.__Macros.update(GlobalData.gGlobalDefines)
self.__Macros.update(GlobalData.gCommandLineDefines)
return self.__Macros
## Get architecture
def _GetArch(self):
return self._Arch
## Set architecture
#
# Changing the default ARCH to another may affect all other information
# because all information in a platform may be ARCH-related. That's
# why we need to clear all internal used members, in order to cause all
# information to be re-retrieved.
#
# @param Value The value of ARCH
#
def _SetArch(self, Value):
if self._Arch == Value:
return
self._Arch = Value
self._Clear()
## Retrieve all information in [Defines] section
#
# (Retriving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch]
for Record in RecordList:
Name = Record[1]
# items defined _PROPERTY_ don't need additional processing
if Name in self:
self[Name] = Record[2]
# some special items in [Defines] section need special treatment
elif Name == TAB_DSC_DEFINES_OUTPUT_DIRECTORY:
self._OutputDirectory = NormPath(Record[2], self._Macros)
if ' ' in self._OutputDirectory:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in OUTPUT_DIRECTORY",
File=self.MetaFile, Line=Record[-1],
ExtraData=self._OutputDirectory)
elif Name == TAB_DSC_DEFINES_FLASH_DEFINITION:
self._FlashDefinition = PathClass(NormPath(Record[2], self._Macros), GlobalData.gWorkspace)
ErrorCode, ErrorInfo = self._FlashDefinition.Validate('.fdf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=Record[-1],
ExtraData=ErrorInfo)
elif Name == TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES:
self._SupArchList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
elif Name == TAB_DSC_DEFINES_BUILD_TARGETS:
self._BuildTargets = GetSplitValueList(Record[2])
elif Name == TAB_DSC_DEFINES_SKUID_IDENTIFIER:
if self._SkuName == None:
self._SkuName = Record[2]
elif Name == TAB_FIX_LOAD_TOP_MEMORY_ADDRESS:
try:
self._LoadFixAddress = int (Record[2], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (Record[2]))
elif Name == TAB_DSC_DEFINES_RFC_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for RFC_LANGUAGES must have double quotes around it, for example: RFC_LANGUAGES = "en-us;zh-hans"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more RFC4646 format language code must be provided for RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
LanguageList = GetSplitValueList(LanguageCodes, TAB_SEMI_COLON_SPLIT)
# check whether there is empty entries in the list
if None in LanguageList:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more empty language code is in RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
self._RFCLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_ISO_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for ISO_LANGUAGES must have double quotes around it, for example: ISO_LANGUAGES = "engchn"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more ISO639-2 format language code must be provided for ISO_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
if len(LanguageCodes)%3:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'bad ISO639-2 format for ISO_LANGUAGES',
File=self.MetaFile, Line=Record[-1])
LanguageList = []
for i in range(0, len(LanguageCodes), 3):
LanguageList.append(LanguageCodes[i:i+3])
self._ISOLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_VPD_TOOL_GUID:
#
# try to convert GUID to a real UUID value to see whether the GUID is format
# for VPD_TOOL_GUID is correct.
#
try:
uuid.UUID(Record[2])
except:
EdkLogger.error("build", FORMAT_INVALID, "Invalid GUID format for VPD_TOOL_GUID", File=self.MetaFile)
self._VpdToolGuid = Record[2]
# set _Header to non-None in order to avoid database re-querying
self._Header = 'DUMMY'
## Retrieve platform name
def _GetPlatformName(self):
if self._PlatformName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._PlatformName == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_NAME", File=self.MetaFile)
return self._PlatformName
## Retrieve file guid
def _GetFileGuid(self):
if self._Guid == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Guid == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_GUID", File=self.MetaFile)
return self._Guid
## Retrieve platform version
def _GetVersion(self):
if self._Version == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Version == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_VERSION", File=self.MetaFile)
return self._Version
## Retrieve platform description file version
def _GetDscSpec(self):
if self._DscSpecification == None:
if self._Header == None:
self._GetHeaderInfo()
if self._DscSpecification == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No DSC_SPECIFICATION", File=self.MetaFile)
return self._DscSpecification
## Retrieve OUTPUT_DIRECTORY
def _GetOutpuDir(self):
if self._OutputDirectory == None:
if self._Header == None:
self._GetHeaderInfo()
if self._OutputDirectory == None:
self._OutputDirectory = os.path.join("Build", self._PlatformName)
return self._OutputDirectory
## Retrieve SUPPORTED_ARCHITECTURES
def _GetSupArch(self):
if self._SupArchList == None:
if self._Header == None:
self._GetHeaderInfo()
if self._SupArchList == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No SUPPORTED_ARCHITECTURES", File=self.MetaFile)
return self._SupArchList
## Retrieve BUILD_TARGETS
def _GetBuildTarget(self):
if self._BuildTargets == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BuildTargets == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BUILD_TARGETS", File=self.MetaFile)
return self._BuildTargets
## Retrieve SKUID_IDENTIFIER
def _GetSkuName(self):
if self._SkuName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._SkuName == None or self._SkuName not in self.SkuIds:
self._SkuName = 'DEFAULT'
return self._SkuName
## Override SKUID_IDENTIFIER
def _SetSkuName(self, Value):
if Value in self.SkuIds:
self._SkuName = Value
# Needs to re-retrieve the PCD information
self._Pcds = None
def _GetFdfFile(self):
if self._FlashDefinition == None:
if self._Header == None:
self._GetHeaderInfo()
if self._FlashDefinition == None:
self._FlashDefinition = ''
return self._FlashDefinition
## Retrieve FLASH_DEFINITION
def _GetBuildNumber(self):
if self._BuildNumber == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BuildNumber == None:
self._BuildNumber = ''
return self._BuildNumber
## Retrieve MAKEFILE_NAME
def _GetMakefileName(self):
if self._MakefileName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._MakefileName == None:
self._MakefileName = ''
return self._MakefileName
## Retrieve BsBaseAddress
def _GetBsBaseAddress(self):
if self._BsBaseAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BsBaseAddress == None:
self._BsBaseAddress = ''
return self._BsBaseAddress
## Retrieve RtBaseAddress
def _GetRtBaseAddress(self):
if self._RtBaseAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._RtBaseAddress == None:
self._RtBaseAddress = ''
return self._RtBaseAddress
## Retrieve the top address for the load fix address
def _GetLoadFixAddress(self):
if self._LoadFixAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._LoadFixAddress == None:
self._LoadFixAddress = self._Macros.get(TAB_FIX_LOAD_TOP_MEMORY_ADDRESS, '0')
try:
self._LoadFixAddress = int (self._LoadFixAddress, 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (self._LoadFixAddress))
#
# If command line defined, should override the value in DSC file.
#
if 'FIX_LOAD_TOP_MEMORY_ADDRESS' in GlobalData.gCommandLineDefines.keys():
try:
self._LoadFixAddress = int(GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS'], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS']))
if self._LoadFixAddress < 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid negative value 0x%x" % (self._LoadFixAddress))
if self._LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self._LoadFixAddress % 0x1000 != 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid unaligned 4K value 0x%x" % (self._LoadFixAddress))
return self._LoadFixAddress
## Retrieve RFCLanguage filter
def _GetRFCLanguages(self):
if self._RFCLanguages == None:
if self._Header == None:
self._GetHeaderInfo()
if self._RFCLanguages == None:
self._RFCLanguages = []
return self._RFCLanguages
## Retrieve ISOLanguage filter
def _GetISOLanguages(self):
if self._ISOLanguages == None:
if self._Header == None:
self._GetHeaderInfo()
if self._ISOLanguages == None:
self._ISOLanguages = []
return self._ISOLanguages
## Retrieve the GUID string for VPD tool
def _GetVpdToolGuid(self):
if self._VpdToolGuid == None:
if self._Header == None:
self._GetHeaderInfo()
if self._VpdToolGuid == None:
self._VpdToolGuid = ''
return self._VpdToolGuid
## Retrieve [SkuIds] section information
def _GetSkuIds(self):
if self._SkuIds == None:
self._SkuIds = sdict()
RecordList = self._RawData[MODEL_EFI_SKU_ID, self._Arch]
for Record in RecordList:
if Record[0] in [None, '']:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID number',
File=self.MetaFile, Line=Record[-1])
if Record[1] in [None, '']:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID name',
File=self.MetaFile, Line=Record[-1])
self._SkuIds[Record[1]] = Record[0]
if 'DEFAULT' not in self._SkuIds:
self._SkuIds['DEFAULT'] = '0'
return self._SkuIds
## Retrieve [Components] section information
def _GetModules(self):
if self._Modules != None:
return self._Modules
self._Modules = sdict()
RecordList = self._RawData[MODEL_META_DATA_COMPONENT, self._Arch]
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
for Record in RecordList:
ModuleFile = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
ModuleId = Record[5]
LineNo = Record[6]
# check the file validation
ErrorCode, ErrorInfo = ModuleFile.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
# Check duplication
# If arch is COMMON, no duplicate module is checked since all modules in all component sections are selected
if self._Arch != 'COMMON' and ModuleFile in self._Modules:
EdkLogger.error('build', FILE_DUPLICATED, File=self.MetaFile, ExtraData=str(ModuleFile), Line=LineNo)
Module = ModuleBuildClassObject()
Module.MetaFile = ModuleFile
# get module override path
RecordList = self._RawData[MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH, self._Arch, None, ModuleId]
if RecordList != []:
Module.SourceOverridePath = os.path.join(GlobalData.gWorkspace, NormPath(RecordList[0][0], Macros))
# Check if the source override path exists
if not os.path.isdir(Module.SourceOverridePath):
EdkLogger.error('build', FILE_NOT_FOUND, Message = 'Source override path does not exist:', File=self.MetaFile, ExtraData=Module.SourceOverridePath, Line=LineNo)
#Add to GlobalData Variables
GlobalData.gOverrideDir[ModuleFile.Key] = Module.SourceOverridePath
# get module private library instance
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, ModuleId]
for Record in RecordList:
LibraryClass = Record[0]
LibraryPath = PathClass(NormPath(Record[1], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = LibraryPath.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for %s\n\t%s [%s]" % (ModuleFile, LibraryPath, LibraryClass))
Module.LibraryClasses[LibraryClass] = LibraryPath
if LibraryPath not in self.LibraryInstances:
self.LibraryInstances.append(LibraryPath)
# get module private PCD setting
for Type in [MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, \
MODEL_PCD_FEATURE_FLAG, MODEL_PCD_DYNAMIC, MODEL_PCD_DYNAMIC_EX]:
RecordList = self._RawData[Type, self._Arch, None, ModuleId]
for TokenSpaceGuid, PcdCName, Setting, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
TokenList = GetSplitValueList(Setting)
DefaultValue = TokenList[0]
if len(TokenList) > 1:
MaxDatumSize = TokenList[1]
else:
MaxDatumSize = ''
TypeString = self._PCD_TYPE_STRING_[Type]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
TypeString,
'',
DefaultValue,
'',
MaxDatumSize,
{},
False,
None
)
Module.Pcds[PcdCName, TokenSpaceGuid] = Pcd
# get module private build options
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, None, ModuleId]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
if (ToolChainFamily, ToolChain) not in Module.BuildOptions:
Module.BuildOptions[ToolChainFamily, ToolChain] = Option
else:
OptionString = Module.BuildOptions[ToolChainFamily, ToolChain]
Module.BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
self._Modules[ModuleFile] = Module
return self._Modules
## Retrieve all possible library instances used in this platform
def _GetLibraryInstances(self):
if self._LibraryInstances == None:
self._GetLibraryClasses()
return self._LibraryInstances
## Retrieve [LibraryClasses] information
def _GetLibraryClasses(self):
if self._LibraryClasses == None:
self._LibraryInstances = []
#
# tdict is a special dict kind of type, used for selecting correct
# library instance for given library class and module type
#
LibraryClassDict = tdict(True, 3)
# track all library class names
LibraryClassSet = set()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, -1]
Macros = self._Macros
for Record in RecordList:
LibraryClass, LibraryInstance, Dummy, Arch, ModuleType, Dummy, LineNo = Record
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for arch=%s\n\t%s [%s]" % (Arch, LibraryInstance, LibraryClass))
LibraryClassSet.add(LibraryClass)
LibraryInstance = PathClass(NormPath(LibraryInstance, Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = LibraryInstance.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if ModuleType != 'COMMON' and ModuleType not in SUP_MODULE_LIST:
EdkLogger.error('build', OPTION_UNKNOWN, "Unknown module type [%s]" % ModuleType,
File=self.MetaFile, ExtraData=LibraryInstance, Line=LineNo)
LibraryClassDict[Arch, ModuleType, LibraryClass] = LibraryInstance
if LibraryInstance not in self._LibraryInstances:
self._LibraryInstances.append(LibraryInstance)
# resolve the specific library instance for each class and each module type
self._LibraryClasses = tdict(True)
for LibraryClass in LibraryClassSet:
# try all possible module types
for ModuleType in SUP_MODULE_LIST:
LibraryInstance = LibraryClassDict[self._Arch, ModuleType, LibraryClass]
if LibraryInstance == None:
continue
self._LibraryClasses[LibraryClass, ModuleType] = LibraryInstance
# for Edk style library instances, which are listed in different section
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch]
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if File not in self._LibraryInstances:
self._LibraryInstances.append(File)
#
# we need the module name as the library class name, so we have
# to parse it here. (self._Bdb[] will trigger a file parse if it
# hasn't been parsed)
#
Library = self._Bdb[File, self._Arch, self._Target, self._Toolchain]
self._LibraryClasses[Library.BaseName, ':dummy:'] = Library
return self._LibraryClasses
## Retrieve all PCD settings in platform
def _GetPcds(self):
if self._Pcds == None:
self._Pcds = sdict()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_VPD))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_EX_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_EX_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_EX_VPD))
return self._Pcds
## Retrieve [BuildOptions]
def _GetBuildOptions(self):
if self._BuildOptions == None:
self._BuildOptions = sdict()
#
# Retrieve build option for EDKII style module
#
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, EDKII_NAME]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
self._BuildOptions[ToolChainFamily, ToolChain, EDKII_NAME] = Option
#
# Retrieve build option for EDK style module
#
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, EDK_NAME]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
self._BuildOptions[ToolChainFamily, ToolChain, EDK_NAME] = Option
return self._BuildOptions
## Retrieve non-dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH
#
PcdDict = tdict(True, 3)
PcdSet = set()
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdSet.add((PcdCName, TokenSpaceGuid))
PcdDict[Arch, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates
for PcdCName, TokenSpaceGuid in PcdSet:
Setting = PcdDict[self._Arch, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
PcdValue, DatumType, MaxDatumSize = AnalyzePcdData(Setting)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
{},
False,
None
)
return Pcds
## Retrieve dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdList.append((PcdCName, TokenSpaceGuid))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid in PcdList:
Setting = PcdDict[self._Arch, self.SkuName, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
PcdValue, DatumType, MaxDatumSize = AnalyzePcdData(Setting)
SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], '', '', '', '', '', PcdValue)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
{self.SkuName : SkuInfo},
False,
None
)
return Pcds
## Retrieve dynamic HII PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicHiiPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdSet = set()
RecordList = self._RawData[Type, self._Arch]
# Find out all possible PCD candidates for self._Arch
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdSet.add((PcdCName, TokenSpaceGuid))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid in PcdSet:
Setting = PcdDict[self._Arch, self.SkuName, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
VariableName, VariableGuid, VariableOffset, DefaultValue = AnalyzeHiiPcdData(Setting)
SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], VariableName, VariableGuid, VariableOffset, DefaultValue)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
DefaultValue,
'',
'',
{self.SkuName : SkuInfo},
False,
None
)
return Pcds
## Retrieve dynamic VPD PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicVpdPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdList.append((PcdCName, TokenSpaceGuid))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid in PcdList:
Setting = PcdDict[self._Arch, self.SkuName, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
#
# For the VOID* type, it can have optional data of MaxDatumSize and InitialValue
# For the Integer & Boolean type, the optional data can only be InitialValue.
# At this point, we put all the data into the PcdClssObject for we don't know the PCD's datumtype
# until the DEC parser has been called.
#
VpdOffset, MaxDatumSize, InitialValue = AnalyzeVpdPcdData(Setting)
SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], '', '', '', '', VpdOffset, InitialValue)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
'',
'',
MaxDatumSize,
{self.SkuName : SkuInfo},
False,
None
)
return Pcds
## Add external modules
#
# The external modules are mostly those listed in FDF file, which don't
# need "build".
#
# @param FilePath The path of module description file
#
def AddModule(self, FilePath):
FilePath = NormPath(FilePath)
if FilePath not in self.Modules:
Module = ModuleBuildClassObject()
Module.MetaFile = FilePath
self.Modules.append(Module)
## Add external PCDs
#
# The external PCDs are mostly those listed in FDF file to specify address
# or offset information.
#
# @param Name Name of the PCD
# @param Guid Token space guid of the PCD
# @param Value Value of the PCD
#
def AddPcd(self, Name, Guid, Value):
if (Name, Guid) not in self.Pcds:
self.Pcds[Name, Guid] = PcdClassObject(Name, Guid, '', '', '', '', '', {}, False, None)
self.Pcds[Name, Guid].DefaultValue = Value
def IsPlatformPcdDeclared(self, DecPcds):
for PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_FEATURE_FLAG,
MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_VPD,
MODEL_PCD_DYNAMIC_EX_DEFAULT, MODEL_PCD_DYNAMIC_EX_HII, MODEL_PCD_DYNAMIC_EX_VPD):
RecordList = self._RawData[PcdType, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
if (PcdCName, TokenSpaceGuid) not in DecPcds:
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) defined in DSC is not declared in DEC files." % (TokenSpaceGuid, PcdCName),
File=self.MetaFile, Line=Dummy4)
PcdValue = ''
if PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
if DecPcds[PcdCName, TokenSpaceGuid].DatumType == "VOID*":
PcdValue = AnalyzeVpdPcdData(Setting)[2]
else:
PcdValue = AnalyzeVpdPcdData(Setting)[1]
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
PcdValue = AnalyzeHiiPcdData(Setting)[3]
else:
PcdValue = AnalyzePcdData(Setting)[0]
if PcdValue:
Valid, ErrStr = CheckPcdDatum(DecPcds[PcdCName, TokenSpaceGuid].DatumType, PcdValue)
if not Valid:
EdkLogger.error('build', FORMAT_INVALID, ErrStr, File=self.MetaFile, Line=Dummy4,
ExtraData="%s.%s" % (TokenSpaceGuid, PcdCName))
_Macros = property(_GetMacros)
Arch = property(_GetArch, _SetArch)
Platform = property(_GetPlatformName)
PlatformName = property(_GetPlatformName)
Guid = property(_GetFileGuid)
Version = property(_GetVersion)
DscSpecification = property(_GetDscSpec)
OutputDirectory = property(_GetOutpuDir)
SupArchList = property(_GetSupArch)
BuildTargets = property(_GetBuildTarget)
SkuName = property(_GetSkuName, _SetSkuName)
FlashDefinition = property(_GetFdfFile)
BuildNumber = property(_GetBuildNumber)
MakefileName = property(_GetMakefileName)
BsBaseAddress = property(_GetBsBaseAddress)
RtBaseAddress = property(_GetRtBaseAddress)
LoadFixAddress = property(_GetLoadFixAddress)
RFCLanguages = property(_GetRFCLanguages)
ISOLanguages = property(_GetISOLanguages)
VpdToolGuid = property(_GetVpdToolGuid)
SkuIds = property(_GetSkuIds)
Modules = property(_GetModules)
LibraryInstances = property(_GetLibraryInstances)
LibraryClasses = property(_GetLibraryClasses)
Pcds = property(_GetPcds)
BuildOptions = property(_GetBuildOptions)
## Platform build information from DEC file
#
# This class is used to retrieve information stored in database and convert them
# into PackageBuildClassObject form for easier use for AutoGen.
#
class DecBuildData(PackageBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : "FixedAtBuild",
MODEL_PCD_PATCHABLE_IN_MODULE : "PatchableInModule",
MODEL_PCD_FEATURE_FLAG : "FeatureFlag",
MODEL_PCD_DYNAMIC : "Dynamic",
MODEL_PCD_DYNAMIC_DEFAULT : "Dynamic",
MODEL_PCD_DYNAMIC_HII : "DynamicHii",
MODEL_PCD_DYNAMIC_VPD : "DynamicVpd",
MODEL_PCD_DYNAMIC_EX : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_DEFAULT : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_HII : "DynamicExHii",
MODEL_PCD_DYNAMIC_EX_VPD : "DynamicExVpd",
}
# dict used to convert part of [Defines] to members of DecBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_DEC_DEFINES_PACKAGE_NAME : "_PackageName",
TAB_DEC_DEFINES_PACKAGE_GUID : "_Guid",
TAB_DEC_DEFINES_PACKAGE_VERSION : "_Version",
TAB_DEC_DEFINES_PKG_UNI_FILE : "_PkgUniFile",
}
## Constructor of DecBuildData
#
# Initialize object of DecBuildData
#
# @param FilePath The path of package description file
# @param RawData The raw data of DEC file
# @param BuildDataBase Database used to retrieve module information
# @param Arch The target architecture
# @param Platform (not used for DecBuildData)
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, File, RawData, BuildDataBase, Arch='COMMON', Target=None, Toolchain=None):
self.MetaFile = File
self._PackageDir = File.Dir
self._RawData = RawData
self._Bdb = BuildDataBase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Clear()
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of DecBuildData to None
def _Clear(self):
self._Header = None
self._PackageName = None
self._Guid = None
self._Version = None
self._PkgUniFile = None
self._Protocols = None
self._Ppis = None
self._Guids = None
self._Includes = None
self._LibraryClasses = None
self._Pcds = None
self.__Macros = None
## Get current effective macros
def _GetMacros(self):
if self.__Macros == None:
self.__Macros = {}
self.__Macros.update(GlobalData.gGlobalDefines)
return self.__Macros
## Get architecture
def _GetArch(self):
return self._Arch
## Set architecture
#
# Changing the default ARCH to another may affect all other information
# because all information in a platform may be ARCH-related. That's
# why we need to clear all internal used members, in order to cause all
# information to be re-retrieved.
#
# @param Value The value of ARCH
#
def _SetArch(self, Value):
if self._Arch == Value:
return
self._Arch = Value
self._Clear()
## Retrieve all information in [Defines] section
#
# (Retriving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch]
for Record in RecordList:
Name = Record[1]
if Name in self:
self[Name] = Record[2]
self._Header = 'DUMMY'
## Retrieve package name
def _GetPackageName(self):
if self._PackageName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._PackageName == None:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "No PACKAGE_NAME", File=self.MetaFile)
return self._PackageName
## Retrieve file guid
def _GetFileGuid(self):
if self._Guid == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Guid == None:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "No PACKAGE_GUID", File=self.MetaFile)
return self._Guid
## Retrieve package version
def _GetVersion(self):
if self._Version == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Version == None:
self._Version = ''
return self._Version
## Retrieve protocol definitions (name/value pairs)
def _GetProtocol(self):
if self._Protocols == None:
#
# tdict is a special kind of dict, used for selecting correct
# protocol defition for given ARCH
#
ProtocolDict = tdict(True)
NameList = []
# find out all protocol definitions for specific and 'common' arch
RecordList = self._RawData[MODEL_EFI_PROTOCOL, self._Arch]
for Name, Guid, Dummy, Arch, ID, LineNo in RecordList:
if Name not in NameList:
NameList.append(Name)
ProtocolDict[Arch, Name] = Guid
# use sdict to keep the order
self._Protocols = sdict()
for Name in NameList:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH for trying
#
self._Protocols[Name] = ProtocolDict[self._Arch, Name]
return self._Protocols
## Retrieve PPI definitions (name/value pairs)
def _GetPpi(self):
if self._Ppis == None:
#
# tdict is a special kind of dict, used for selecting correct
# PPI defition for given ARCH
#
PpiDict = tdict(True)
NameList = []
# find out all PPI definitions for specific arch and 'common' arch
RecordList = self._RawData[MODEL_EFI_PPI, self._Arch]
for Name, Guid, Dummy, Arch, ID, LineNo in RecordList:
if Name not in NameList:
NameList.append(Name)
PpiDict[Arch, Name] = Guid
# use sdict to keep the order
self._Ppis = sdict()
for Name in NameList:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH for trying
#
self._Ppis[Name] = PpiDict[self._Arch, Name]
return self._Ppis
## Retrieve GUID definitions (name/value pairs)
def _GetGuid(self):
if self._Guids == None:
#
# tdict is a special kind of dict, used for selecting correct
# GUID defition for given ARCH
#
GuidDict = tdict(True)
NameList = []
# find out all protocol definitions for specific and 'common' arch
RecordList = self._RawData[MODEL_EFI_GUID, self._Arch]
for Name, Guid, Dummy, Arch, ID, LineNo in RecordList:
if Name not in NameList:
NameList.append(Name)
GuidDict[Arch, Name] = Guid
# use sdict to keep the order
self._Guids = sdict()
for Name in NameList:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH for trying
#
self._Guids[Name] = GuidDict[self._Arch, Name]
return self._Guids
## Retrieve public include paths declared in this package
def _GetInclude(self):
if self._Includes == None:
self._Includes = []
RecordList = self._RawData[MODEL_EFI_INCLUDE, self._Arch]
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), self._PackageDir, Arch=self._Arch)
LineNo = Record[-1]
# validate the path
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# avoid duplicate include path
if File not in self._Includes:
self._Includes.append(File)
return self._Includes
## Retrieve library class declarations (not used in build at present)
def _GetLibraryClass(self):
if self._LibraryClasses == None:
#
# tdict is a special kind of dict, used for selecting correct
# library class declaration for given ARCH
#
LibraryClassDict = tdict(True)
LibraryClassSet = set()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch]
Macros = self._Macros
for LibraryClass, File, Dummy, Arch, ID, LineNo in RecordList:
File = PathClass(NormPath(File, Macros), self._PackageDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
LibraryClassSet.add(LibraryClass)
LibraryClassDict[Arch, LibraryClass] = File
self._LibraryClasses = sdict()
for LibraryClass in LibraryClassSet:
self._LibraryClasses[LibraryClass] = LibraryClassDict[self._Arch, LibraryClass]
return self._LibraryClasses
## Retrieve PCD declarations
def _GetPcds(self):
if self._Pcds == None:
self._Pcds = sdict()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC_EX))
return self._Pcds
## Retrieve PCD declarations for given type
def _GetPcd(self, Type):
Pcds = sdict()
#
# tdict is a special kind of dict, used for selecting correct
# PCD declaration for given ARCH
#
PcdDict = tdict(True, 3)
# for summarizing PCD
PcdSet = set()
# find out all PCDs of the 'type'
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, Dummy1, Dummy2 in RecordList:
PcdDict[Arch, PcdCName, TokenSpaceGuid] = Setting
PcdSet.add((PcdCName, TokenSpaceGuid))
for PcdCName, TokenSpaceGuid in PcdSet:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH and try again
#
Setting = PcdDict[self._Arch, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
DefaultValue, DatumType, TokenNumber = AnalyzePcdData(Setting)
Pcds[PcdCName, TokenSpaceGuid, self._PCD_TYPE_STRING_[Type]] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
DefaultValue,
TokenNumber,
'',
{},
False,
None
)
return Pcds
_Macros = property(_GetMacros)
Arch = property(_GetArch, _SetArch)
PackageName = property(_GetPackageName)
Guid = property(_GetFileGuid)
Version = property(_GetVersion)
Protocols = property(_GetProtocol)
Ppis = property(_GetPpi)
Guids = property(_GetGuid)
Includes = property(_GetInclude)
LibraryClasses = property(_GetLibraryClass)
Pcds = property(_GetPcds)
## Module build information from INF file
#
# This class is used to retrieve information stored in database and convert them
# into ModuleBuildClassObject form for easier use for AutoGen.
#
class InfBuildData(ModuleBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : "FixedAtBuild",
MODEL_PCD_PATCHABLE_IN_MODULE : "PatchableInModule",
MODEL_PCD_FEATURE_FLAG : "FeatureFlag",
MODEL_PCD_DYNAMIC : "Dynamic",
MODEL_PCD_DYNAMIC_DEFAULT : "Dynamic",
MODEL_PCD_DYNAMIC_HII : "DynamicHii",
MODEL_PCD_DYNAMIC_VPD : "DynamicVpd",
MODEL_PCD_DYNAMIC_EX : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_DEFAULT : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_HII : "DynamicExHii",
MODEL_PCD_DYNAMIC_EX_VPD : "DynamicExVpd",
}
# dict used to convert part of [Defines] to members of InfBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_INF_DEFINES_BASE_NAME : "_BaseName",
TAB_INF_DEFINES_FILE_GUID : "_Guid",
TAB_INF_DEFINES_MODULE_TYPE : "_ModuleType",
#
# Optional Fields
#
#TAB_INF_DEFINES_INF_VERSION : "_AutoGenVersion",
TAB_INF_DEFINES_COMPONENT_TYPE : "_ComponentType",
TAB_INF_DEFINES_MAKEFILE_NAME : "_MakefileName",
#TAB_INF_DEFINES_CUSTOM_MAKEFILE : "_CustomMakefile",
TAB_INF_DEFINES_DPX_SOURCE :"_DxsFile",
TAB_INF_DEFINES_VERSION_NUMBER : "_Version",
TAB_INF_DEFINES_VERSION_STRING : "_Version",
TAB_INF_DEFINES_VERSION : "_Version",
TAB_INF_DEFINES_PCD_IS_DRIVER : "_PcdIsDriver",
TAB_INF_DEFINES_SHADOW : "_Shadow",
TAB_COMPONENTS_SOURCE_OVERRIDE_PATH : "_SourceOverridePath",
}
# dict used to convert Component type to Module type
_MODULE_TYPE_ = {
"LIBRARY" : "BASE",
"SECURITY_CORE" : "SEC",
"PEI_CORE" : "PEI_CORE",
"COMBINED_PEIM_DRIVER" : "PEIM",
"PIC_PEIM" : "PEIM",
"RELOCATABLE_PEIM" : "PEIM",
"PE32_PEIM" : "PEIM",
"BS_DRIVER" : "DXE_DRIVER",
"RT_DRIVER" : "DXE_RUNTIME_DRIVER",
"SAL_RT_DRIVER" : "DXE_SAL_DRIVER",
"DXE_SMM_DRIVER" : "DXE_SMM_DRIVER",
# "SMM_DRIVER" : "DXE_SMM_DRIVER",
# "BS_DRIVER" : "DXE_SMM_DRIVER",
# "BS_DRIVER" : "UEFI_DRIVER",
"APPLICATION" : "UEFI_APPLICATION",
"LOGO" : "BASE",
}
# regular expression for converting XXX_FLAGS in [nmake] section to new type
_NMAKE_FLAG_PATTERN_ = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
# dict used to convert old tool name used in [nmake] section to new ones
_TOOL_CODE_ = {
"C" : "CC",
"LIB" : "SLINK",
"LINK" : "DLINK",
}
## Constructor of DscBuildData
#
# Initialize object of DscBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform The name of platform employing this module
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDatabase, Arch='COMMON', Target=None, Toolchain=None):
self.MetaFile = FilePath
self._ModuleDir = FilePath.Dir
self._RawData = RawData
self._Bdb = BuildDatabase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Platform = 'COMMON'
self._SourceOverridePath = None
if FilePath.Key in GlobalData.gOverrideDir:
self._SourceOverridePath = GlobalData.gOverrideDir[FilePath.Key]
self._Clear()
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of InfBuildData to None
def _Clear(self):
self._Header_ = None
self._AutoGenVersion = None
self._BaseName = None
self._DxsFile = None
self._ModuleType = None
self._ComponentType = None
self._BuildType = None
self._Guid = None
self._Version = None
self._PcdIsDriver = None
self._BinaryModule = None
self._Shadow = None
self._MakefileName = None
self._CustomMakefile = None
self._Specification = None
self._LibraryClass = None
self._ModuleEntryPointList = None
self._ModuleUnloadImageList = None
self._ConstructorList = None
self._DestructorList = None
self._Defs = None
self._Binaries = None
self._Sources = None
self._LibraryClasses = None
self._Libraries = None
self._Protocols = None
self._Ppis = None
self._Guids = None
self._Includes = None
self._Packages = None
self._Pcds = None
self._BuildOptions = None
self._Depex = None
self._DepexExpression = None
self.__Macros = None
## Get current effective macros
def _GetMacros(self):
if self.__Macros == None:
self.__Macros = {}
# EDK_GLOBAL defined macros can be applied to EDK module
if self.AutoGenVersion < 0x00010005:
self.__Macros.update(GlobalData.gEdkGlobal)
self.__Macros.update(GlobalData.gGlobalDefines)
# VBox hack begin - Required for referencing files outside the workspace, like the reset vectors and logo.
self.__Macros.update(GlobalData.gCommandLineDefines);
# VBox hack end.
return self.__Macros
## Get architecture
def _GetArch(self):
return self._Arch
## Set architecture
#
# Changing the default ARCH to another may affect all other information
# because all information in a platform may be ARCH-related. That's
# why we need to clear all internal used members, in order to cause all
# information to be re-retrieved.
#
# @param Value The value of ARCH
#
def _SetArch(self, Value):
if self._Arch == Value:
return
self._Arch = Value
self._Clear()
## Return the name of platform employing this module
def _GetPlatform(self):
return self._Platform
## Change the name of platform employing this module
#
# Changing the default name of platform to another may affect some information
# because they may be PLATFORM-related. That's why we need to clear all internal
# used members, in order to cause all information to be re-retrieved.
#
def _SetPlatform(self, Value):
if self._Platform == Value:
return
self._Platform = Value
self._Clear()
## Retrieve all information in [Defines] section
#
# (Retriving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name, Value = Record[1], ReplaceMacro(Record[2], self._Macros, False)
# items defined _PROPERTY_ don't need additional processing
if Name in self:
self[Name] = Value
# some special items in [Defines] section need special treatment
elif Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION', 'EDK_RELEASE_VERSION', 'PI_SPECIFICATION_VERSION'):
if Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION'):
Name = 'UEFI_SPECIFICATION_VERSION'
if self._Specification == None:
self._Specification = sdict()
self._Specification[Name] = GetHexVerValue(Value)
if self._Specification[Name] == None:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"'%s' format is not supported for %s" % (Value, Name),
File=self.MetaFile, Line=Record[-1])
elif Name == 'LIBRARY_CLASS':
if self._LibraryClass == None:
self._LibraryClass = []
ValueList = GetSplitValueList(Value)
LibraryClass = ValueList[0]
if len(ValueList) > 1:
SupModuleList = GetSplitValueList(ValueList[1], ' ')
else:
SupModuleList = SUP_MODULE_LIST
self._LibraryClass.append(LibraryClassObject(LibraryClass, SupModuleList))
elif Name == 'ENTRY_POINT':
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == 'UNLOAD_IMAGE':
if self._ModuleUnloadImageList == None:
self._ModuleUnloadImageList = []
if not Value:
continue
self._ModuleUnloadImageList.append(Value)
elif Name == 'CONSTRUCTOR':
if self._ConstructorList == None:
self._ConstructorList = []
if not Value:
continue
self._ConstructorList.append(Value)
elif Name == 'DESTRUCTOR':
if self._DestructorList == None:
self._DestructorList = []
if not Value:
continue
self._DestructorList.append(Value)
elif Name == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
TokenList = GetSplitValueList(Value)
if self._CustomMakefile == None:
self._CustomMakefile = {}
if len(TokenList) < 2:
self._CustomMakefile['MSFT'] = TokenList[0]
self._CustomMakefile['GCC'] = TokenList[0]
else:
if TokenList[0] not in ['MSFT', 'GCC']:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"No supported family [%s]" % TokenList[0],
File=self.MetaFile, Line=Record[-1])
self._CustomMakefile[TokenList[0]] = TokenList[1]
else:
if self._Defs == None:
self._Defs = sdict()
self._Defs[Name] = Value
#
# Retrieve information in sections specific to Edk.x modules
#
if self.AutoGenVersion >= 0x00010005:
if not self._ModuleType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"MODULE_TYPE is not given", File=self.MetaFile)
if self._ModuleType not in SUP_MODULE_LIST:
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name = Record[1]
if Name == "MODULE_TYPE":
LineNo = Record[6]
break
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"MODULE_TYPE %s is not supported for EDK II, valid values are:\n %s" % (self._ModuleType,' '.join(l for l in SUP_MODULE_LIST)),
File=self.MetaFile, Line=LineNo)
if (self._Specification == None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x0001000A):
if self._ModuleType == SUP_MODULE_SMM_CORE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "SMM_CORE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x0001000A", File=self.MetaFile)
if self._Defs and 'PCI_DEVICE_ID' in self._Defs and 'PCI_VENDOR_ID' in self._Defs \
and 'PCI_CLASS_CODE' in self._Defs:
self._BuildType = 'UEFI_OPTIONROM'
elif self._Defs and 'UEFI_HII_RESOURCE_SECTION' in self._Defs \
and self._Defs['UEFI_HII_RESOURCE_SECTION'] == 'TRUE':
self._BuildType = 'UEFI_HII'
else:
self._BuildType = self._ModuleType.upper()
if self._DxsFile:
File = PathClass(NormPath(self._DxsFile), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if self.Sources == None:
self._Sources = []
self._Sources.append(File)
else:
if not self._ComponentType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"COMPONENT_TYPE is not given", File=self.MetaFile)
self._BuildType = self._ComponentType.upper()
if self._ComponentType in self._MODULE_TYPE_:
self._ModuleType = self._MODULE_TYPE_[self._ComponentType]
if self._ComponentType == 'LIBRARY':
self._LibraryClass = [LibraryClassObject(self._BaseName, SUP_MODULE_LIST)]
# make use some [nmake] section macros
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
Macros['PROCESSOR'] = self._Arch
RecordList = self._RawData[MODEL_META_DATA_NMAKE, self._Arch, self._Platform]
for Name,Value,Dummy,Arch,Platform,ID,LineNo in RecordList:
Value = ReplaceMacro(Value, Macros, True)
if Name == "IMAGE_ENTRY_POINT":
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == "DPX_SOURCE":
File = PathClass(NormPath(Value), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if self.Sources == None:
self._Sources = []
self._Sources.append(File)
else:
ToolList = self._NMAKE_FLAG_PATTERN_.findall(Name)
if len(ToolList) == 0 or len(ToolList) != 1:
pass
# EdkLogger.warn("build", "Don't know how to do with macro [%s]" % Name,
# File=self.MetaFile, Line=LineNo)
else:
if self._BuildOptions == None:
self._BuildOptions = sdict()
if ToolList[0] in self._TOOL_CODE_:
Tool = self._TOOL_CODE_[ToolList[0]]
else:
Tool = ToolList[0]
ToolChain = "*_*_*_%s_FLAGS" % Tool
ToolChainFamily = 'MSFT' # Edk.x only support MSFT tool chain
#ignore not replaced macros in value
ValueList = GetSplitList(' ' + Value, '/D')
Dummy = ValueList[0]
for Index in range(1, len(ValueList)):
if ValueList[Index][-1] == '=' or ValueList[Index] == '':
continue
Dummy = Dummy + ' /D ' + ValueList[Index]
Value = Dummy.strip()
if (ToolChainFamily, ToolChain) not in self._BuildOptions:
self._BuildOptions[ToolChainFamily, ToolChain] = Value
else:
OptionString = self._BuildOptions[ToolChainFamily, ToolChain]
self._BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Value
# set _Header to non-None in order to avoid database re-querying
self._Header_ = 'DUMMY'
## Retrieve file version
def _GetInfVersion(self):
if self._AutoGenVersion == None:
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
if Record[1] == TAB_INF_DEFINES_INF_VERSION:
self._AutoGenVersion = int(Record[2], 0)
break
if self._AutoGenVersion == None:
self._AutoGenVersion = 0x00010000
return self._AutoGenVersion
## Retrieve BASE_NAME
def _GetBaseName(self):
if self._BaseName == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._BaseName == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BASE_NAME name", File=self.MetaFile)
return self._BaseName
## Retrieve DxsFile
def _GetDxsFile(self):
if self._DxsFile == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._DxsFile == None:
self._DxsFile = ''
return self._DxsFile
## Retrieve MODULE_TYPE
def _GetModuleType(self):
if self._ModuleType == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ModuleType == None:
self._ModuleType = 'BASE'
if self._ModuleType not in SUP_MODULE_LIST:
self._ModuleType = "USER_DEFINED"
return self._ModuleType
## Retrieve COMPONENT_TYPE
def _GetComponentType(self):
if self._ComponentType == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ComponentType == None:
self._ComponentType = 'USER_DEFINED'
return self._ComponentType
## Retrieve "BUILD_TYPE"
def _GetBuildType(self):
if self._BuildType == None:
if self._Header_ == None:
self._GetHeaderInfo()
if not self._BuildType:
self._BuildType = "BASE"
return self._BuildType
## Retrieve file guid
def _GetFileGuid(self):
if self._Guid == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Guid == None:
self._Guid = '00000000-0000-0000-000000000000'
return self._Guid
## Retrieve module version
def _GetVersion(self):
if self._Version == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Version == None:
self._Version = '0.0'
return self._Version
## Retrieve PCD_IS_DRIVER
def _GetPcdIsDriver(self):
if self._PcdIsDriver == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._PcdIsDriver == None:
self._PcdIsDriver = ''
return self._PcdIsDriver
## Retrieve SHADOW
def _GetShadow(self):
if self._Shadow == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Shadow != None and self._Shadow.upper() == 'TRUE':
self._Shadow = True
else:
self._Shadow = False
return self._Shadow
## Retrieve CUSTOM_MAKEFILE
def _GetMakefile(self):
if self._CustomMakefile == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._CustomMakefile == None:
self._CustomMakefile = {}
return self._CustomMakefile
## Retrieve EFI_SPECIFICATION_VERSION
def _GetSpec(self):
if self._Specification == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Specification == None:
self._Specification = {}
return self._Specification
## Retrieve LIBRARY_CLASS
def _GetLibraryClass(self):
if self._LibraryClass == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._LibraryClass == None:
self._LibraryClass = []
return self._LibraryClass
## Retrieve ENTRY_POINT
def _GetEntryPoint(self):
if self._ModuleEntryPointList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
return self._ModuleEntryPointList
## Retrieve UNLOAD_IMAGE
def _GetUnloadImage(self):
if self._ModuleUnloadImageList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ModuleUnloadImageList == None:
self._ModuleUnloadImageList = []
return self._ModuleUnloadImageList
## Retrieve CONSTRUCTOR
def _GetConstructor(self):
if self._ConstructorList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ConstructorList == None:
self._ConstructorList = []
return self._ConstructorList
## Retrieve DESTRUCTOR
def _GetDestructor(self):
if self._DestructorList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._DestructorList == None:
self._DestructorList = []
return self._DestructorList
## Retrieve definies other than above ones
def _GetDefines(self):
if self._Defs == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Defs == None:
self._Defs = sdict()
return self._Defs
## Retrieve binary files
def _GetBinaryFiles(self):
if self._Binaries == None:
self._Binaries = []
RecordList = self._RawData[MODEL_EFI_BINARY_FILE, self._Arch, self._Platform]
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
Macros['PROCESSOR'] = self._Arch
for Record in RecordList:
FileType = Record[0]
LineNo = Record[-1]
Target = 'COMMON'
FeatureFlag = []
if Record[2]:
TokenList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
if TokenList:
Target = TokenList[0]
if len(TokenList) > 1:
FeatureFlag = Record[1:]
File = PathClass(NormPath(Record[1], Macros), self._ModuleDir, '', FileType, True, self._Arch, '', Target)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
self._Binaries.append(File)
return self._Binaries
## Retrieve source files
def _GetSourceFiles(self):
if self._Sources == None:
self._Sources = []
RecordList = self._RawData[MODEL_EFI_SOURCE_FILE, self._Arch, self._Platform]
Macros = self._Macros
for Record in RecordList:
LineNo = Record[-1]
ToolChainFamily = Record[1]
TagName = Record[2]
ToolCode = Record[3]
FeatureFlag = Record[4]
if self.AutoGenVersion < 0x00010005:
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
Macros['PROCESSOR'] = self._Arch
# old module source files (Edk)
File = PathClass(NormPath(Record[0], Macros), self._ModuleDir, self._SourceOverridePath,
'', False, self._Arch, ToolChainFamily, '', TagName, ToolCode)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(CaseSensitive=False)
if ErrorCode != 0:
if File.Ext.lower() == '.h':
EdkLogger.warn('build', 'Include file not found', ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
continue
else:
EdkLogger.error('build', ErrorCode, ExtraData=File, File=self.MetaFile, Line=LineNo)
else:
File = PathClass(NormPath(Record[0], Macros), self._ModuleDir, '',
'', False, self._Arch, ToolChainFamily, '', TagName, ToolCode)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
self._Sources.append(File)
return self._Sources
## Retrieve library classes employed by this module
def _GetLibraryClassUses(self):
if self._LibraryClasses == None:
self._LibraryClasses = sdict()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, self._Platform]
for Record in RecordList:
Lib = Record[0]
Instance = Record[1]
if Instance:
Instance = NormPath(Instance, self._Macros)
self._LibraryClasses[Lib] = Instance
return self._LibraryClasses
## Retrieve library names (for Edk.x style of modules)
def _GetLibraryNames(self):
if self._Libraries == None:
self._Libraries = []
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch, self._Platform]
for Record in RecordList:
LibraryName = ReplaceMacro(Record[0], self._Macros, False)
# in case of name with '.lib' extension, which is unusual in Edk.x inf
LibraryName = os.path.splitext(LibraryName)[0]
if LibraryName not in self._Libraries:
self._Libraries.append(LibraryName)
return self._Libraries
## Retrieve protocols consumed/produced by this module
def _GetProtocols(self):
if self._Protocols == None:
self._Protocols = sdict()
RecordList = self._RawData[MODEL_EFI_PROTOCOL, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = ProtocolValue(CName, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Protocol [%s] is not found under [Protocols] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
self._Protocols[CName] = Value
return self._Protocols
## Retrieve PPIs consumed/produced by this module
def _GetPpis(self):
if self._Ppis == None:
self._Ppis = sdict()
RecordList = self._RawData[MODEL_EFI_PPI, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = PpiValue(CName, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of PPI [%s] is not found under [Ppis] section in " % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
self._Ppis[CName] = Value
return self._Ppis
## Retrieve GUIDs consumed/produced by this module
def _GetGuids(self):
if self._Guids == None:
self._Guids = sdict()
RecordList = self._RawData[MODEL_EFI_GUID, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = GuidValue(CName, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
self._Guids[CName] = Value
return self._Guids
## Retrieve include paths necessary for this module (for Edk.x style of modules)
def _GetIncludes(self):
if self._Includes == None:
self._Includes = []
if self._SourceOverridePath:
self._Includes.append(self._SourceOverridePath)
Macros = self._Macros
if 'PROCESSOR' in GlobalData.gEdkGlobal.keys():
Macros['PROCESSOR'] = GlobalData.gEdkGlobal['PROCESSOR']
else:
Macros['PROCESSOR'] = self._Arch
RecordList = self._RawData[MODEL_EFI_INCLUDE, self._Arch, self._Platform]
for Record in RecordList:
if Record[0].find('EDK_SOURCE') > -1:
Macros['EDK_SOURCE'] = GlobalData.gEcpSource
File = NormPath(Record[0], self._Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = os.path.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
self._Includes.append(File)
#TRICK: let compiler to choose correct header file
Macros['EDK_SOURCE'] = GlobalData.gEdkSource
File = NormPath(Record[0], self._Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = os.path.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
self._Includes.append(File)
else:
File = NormPath(Record[0], Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = os.path.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
self._Includes.append(File)
return self._Includes
## Retrieve packages this module depends on
def _GetPackages(self):
if self._Packages == None:
self._Packages = []
RecordList = self._RawData[MODEL_META_DATA_PACKAGE, self._Arch, self._Platform]
Macros = self._Macros
Macros['EDK_SOURCE'] = GlobalData.gEcpSource
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.dec')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# parse this package now. we need it to get protocol/ppi/guid value
Package = self._Bdb[File, self._Arch, self._Target, self._Toolchain]
self._Packages.append(Package)
return self._Packages
## Retrieve PCDs used in this module
def _GetPcds(self):
if self._Pcds == None:
self._Pcds = sdict()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC_EX))
return self._Pcds
## Retrieve build options specific to this module
def _GetBuildOptions(self):
if self._BuildOptions == None:
self._BuildOptions = sdict()
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, self._Platform]
for Record in RecordList:
ToolChainFamily = Record[0]
ToolChain = Record[1]
Option = Record[2]
if (ToolChainFamily, ToolChain) not in self._BuildOptions:
self._BuildOptions[ToolChainFamily, ToolChain] = Option
else:
# concatenate the option string if they're for the same tool
OptionString = self._BuildOptions[ToolChainFamily, ToolChain]
self._BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
return self._BuildOptions
## Retrieve dependency expression
def _GetDepex(self):
if self._Depex == None:
self._Depex = tdict(False, 2)
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
# If the module has only Binaries and no Sources, then ignore [Depex]
if self.Sources == None or self.Sources == []:
if self.Binaries != None and self.Binaries != []:
return self._Depex
# PEIM and DXE drivers must have a valid [Depex] section
if len(self.LibraryClass) == 0 and len(RecordList) == 0:
if self.ModuleType == 'DXE_DRIVER' or self.ModuleType == 'PEIM' or self.ModuleType == 'DXE_SMM_DRIVER' or \
self.ModuleType == 'DXE_SAL_DRIVER' or self.ModuleType == 'DXE_RUNTIME_DRIVER':
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "No [Depex] section or no valid expression in [Depex] section for [%s] module" \
% self.ModuleType, File=self.MetaFile)
Depex = sdict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in Depex:
Depex[Arch, ModuleType] = []
DepexList = Depex[Arch, ModuleType]
for Token in TokenList:
if Token in DEPEX_SUPPORTED_OPCODE:
DepexList.append(Token)
elif Token.endswith(".inf"): # module file name
ModuleFile = os.path.normpath(Token)
Module = self.BuildDatabase[ModuleFile]
if Module == None:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "Module is not found in active platform",
ExtraData=Token, File=self.MetaFile, Line=Record[-1])
DepexList.append(Module.Guid)
else:
# get the GUID value now
Value = ProtocolValue(Token, self.Packages)
if Value == None:
Value = PpiValue(Token, self.Packages)
if Value == None:
Value = GuidValue(Token, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of [%s] is not found in" % Token,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
DepexList.append(Value)
for Arch, ModuleType in Depex:
self._Depex[Arch, ModuleType] = Depex[Arch, ModuleType]
return self._Depex
## Retrieve depedency expression
def _GetDepexExpression(self):
if self._DepexExpression == None:
self._DepexExpression = tdict(False, 2)
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
DepexExpression = sdict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in DepexExpression:
DepexExpression[Arch, ModuleType] = ''
for Token in TokenList:
DepexExpression[Arch, ModuleType] = DepexExpression[Arch, ModuleType] + Token.strip() + ' '
for Arch, ModuleType in DepexExpression:
self._DepexExpression[Arch, ModuleType] = DepexExpression[Arch, ModuleType]
return self._DepexExpression
## Retrieve PCD for given type
def _GetPcd(self, Type):
Pcds = sdict()
PcdDict = tdict(True, 4)
PcdList = []
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, PcdCName, Setting, Arch, Platform, Dummy1, LineNo in RecordList:
PcdDict[Arch, Platform, PcdCName, TokenSpaceGuid] = (Setting, LineNo)
PcdList.append((PcdCName, TokenSpaceGuid))
# get the guid value
if TokenSpaceGuid not in self.Guids:
Value = GuidValue(TokenSpaceGuid, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % TokenSpaceGuid,
ExtraData=PackageList, File=self.MetaFile, Line=LineNo)
self.Guids[TokenSpaceGuid] = Value
# resolve PCD type, value, datum info, etc. by getting its definition from package
for PcdCName, TokenSpaceGuid in PcdList:
Setting, LineNo = PcdDict[self._Arch, self.Platform, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
ValueList = AnalyzePcdData(Setting)
DefaultValue = ValueList[0]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
'',
'',
DefaultValue,
'',
'',
{},
False,
self.Guids[TokenSpaceGuid]
)
# get necessary info from package declaring this PCD
for Package in self.Packages:
#
# 'dynamic' in INF means its type is determined by platform;
# if platform doesn't give its type, use 'lowest' one in the
# following order, if any
#
# "FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"
#
PcdType = self._PCD_TYPE_STRING_[Type]
if Type == MODEL_PCD_DYNAMIC:
Pcd.Pending = True
for T in ["FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"]:
if (PcdCName, TokenSpaceGuid, T) in Package.Pcds:
PcdType = T
break
else:
Pcd.Pending = False
if (PcdCName, TokenSpaceGuid, PcdType) in Package.Pcds:
PcdInPackage = Package.Pcds[PcdCName, TokenSpaceGuid, PcdType]
Pcd.Type = PcdType
Pcd.TokenValue = PcdInPackage.TokenValue
#
# Check whether the token value exist or not.
#
if Pcd.TokenValue == None or Pcd.TokenValue == "":
EdkLogger.error(
'build',
FORMAT_INVALID,
"No TokenValue for PCD [%s.%s] in [%s]!" % (TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check hexadecimal token value length and format.
#
ReIsValidPcdTokenValue = re.compile(r"^[0][x|X][0]*[0-9a-fA-F]{1,8}$", re.DOTALL)
if Pcd.TokenValue.startswith("0x") or Pcd.TokenValue.startswith("0X"):
if ReIsValidPcdTokenValue.match(Pcd.TokenValue) == None:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid:" % (Pcd.TokenValue, TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check decimal token value length and format.
#
else:
try:
TokenValueInt = int (Pcd.TokenValue, 10)
if (TokenValueInt < 0 or TokenValueInt > 4294967295):
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, as a decimal it should between: 0 - 4294967295!"% (Pcd.TokenValue, TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
except:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, it should be hexadecimal or decimal!"% (Pcd.TokenValue, TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
Pcd.DatumType = PcdInPackage.DatumType
Pcd.MaxDatumSize = PcdInPackage.MaxDatumSize
Pcd.InfDefaultValue = Pcd.DefaultValue
if Pcd.DefaultValue in [None, '']:
Pcd.DefaultValue = PcdInPackage.DefaultValue
break
else:
EdkLogger.error(
'build',
FORMAT_INVALID,
"PCD [%s.%s] in [%s] is not found in dependent packages:" % (TokenSpaceGuid, PcdCName, self.MetaFile),
File =self.MetaFile, Line=LineNo,
ExtraData="\t%s" % '\n\t'.join([str(P) for P in self.Packages])
)
Pcds[PcdCName, TokenSpaceGuid] = Pcd
return Pcds
_Macros = property(_GetMacros)
Arch = property(_GetArch, _SetArch)
Platform = property(_GetPlatform, _SetPlatform)
AutoGenVersion = property(_GetInfVersion)
BaseName = property(_GetBaseName)
ModuleType = property(_GetModuleType)
ComponentType = property(_GetComponentType)
BuildType = property(_GetBuildType)
Guid = property(_GetFileGuid)
Version = property(_GetVersion)
PcdIsDriver = property(_GetPcdIsDriver)
Shadow = property(_GetShadow)
CustomMakefile = property(_GetMakefile)
Specification = property(_GetSpec)
LibraryClass = property(_GetLibraryClass)
ModuleEntryPointList = property(_GetEntryPoint)
ModuleUnloadImageList = property(_GetUnloadImage)
ConstructorList = property(_GetConstructor)
DestructorList = property(_GetDestructor)
Defines = property(_GetDefines)
DxsFile = property(_GetDxsFile)
Binaries = property(_GetBinaryFiles)
Sources = property(_GetSourceFiles)
LibraryClasses = property(_GetLibraryClassUses)
Libraries = property(_GetLibraryNames)
Protocols = property(_GetProtocols)
Ppis = property(_GetPpis)
Guids = property(_GetGuids)
Includes = property(_GetIncludes)
Packages = property(_GetPackages)
Pcds = property(_GetPcds)
BuildOptions = property(_GetBuildOptions)
Depex = property(_GetDepex)
DepexExpression = property(_GetDepexExpression)
## Database
#
# This class defined the build database for all modules, packages and platform.
# It will call corresponding parser for the given file if it cannot find it in
# the database.
#
# @param DbPath Path of database file
# @param GlobalMacros Global macros used for replacement during file parsing
# @prarm RenewDb=False Create new database file if it's already there
#
class WorkspaceDatabase(object):
# default database file path
_DB_PATH_ = "Conf/.cache/build.db"
#
# internal class used for call corresponding file parser and caching the result
# to avoid unnecessary re-parsing
#
class BuildObjectFactory(object):
_FILE_TYPE_ = {
".inf" : MODEL_FILE_INF,
".dec" : MODEL_FILE_DEC,
".dsc" : MODEL_FILE_DSC,
}
# file parser
_FILE_PARSER_ = {
MODEL_FILE_INF : InfParser,
MODEL_FILE_DEC : DecParser,
MODEL_FILE_DSC : DscParser,
}
# convert to xxxBuildData object
_GENERATOR_ = {
MODEL_FILE_INF : InfBuildData,
MODEL_FILE_DEC : DecBuildData,
MODEL_FILE_DSC : DscBuildData,
}
_CACHE_ = {} # (FilePath, Arch) : <object>
# constructor
def __init__(self, WorkspaceDb):
self.WorkspaceDb = WorkspaceDb
# key = (FilePath, Arch=None)
def __contains__(self, Key):
FilePath = Key[0]
if len(Key) > 1:
Arch = Key[1]
else:
Arch = None
return (FilePath, Arch) in self._CACHE_
# key = (FilePath, Arch=None, Target=None, Toochain=None)
def __getitem__(self, Key):
FilePath = Key[0]
KeyLength = len(Key)
if KeyLength > 1:
Arch = Key[1]
else:
Arch = None
if KeyLength > 2:
Target = Key[2]
else:
Target = None
if KeyLength > 3:
Toolchain = Key[3]
else:
Toolchain = None
# if it's generated before, just return the cached one
Key = (FilePath, Arch, Target, Toolchain)
if Key in self._CACHE_:
return self._CACHE_[Key]
# check file type
Ext = FilePath.Type
if Ext not in self._FILE_TYPE_:
return None
FileType = self._FILE_TYPE_[Ext]
if FileType not in self._GENERATOR_:
return None
# get the parser ready for this file
MetaFile = self._FILE_PARSER_[FileType](
FilePath,
FileType,
MetaFileStorage(self.WorkspaceDb.Cur, FilePath, FileType)
)
# alwasy do post-process, in case of macros change
MetaFile.DoPostProcess()
# object the build is based on
BuildObject = self._GENERATOR_[FileType](
FilePath,
MetaFile,
self,
Arch,
Target,
Toolchain
)
self._CACHE_[Key] = BuildObject
return BuildObject
# placeholder for file format conversion
class TransformObjectFactory:
def __init__(self, WorkspaceDb):
self.WorkspaceDb = WorkspaceDb
# key = FilePath, Arch
def __getitem__(self, Key):
pass
## Constructor of WorkspaceDatabase
#
# @param DbPath Path of database file
# @param GlobalMacros Global macros used for replacement during file parsing
# @prarm RenewDb=False Create new database file if it's already there
#
def __init__(self, DbPath, RenewDb=False):
self._DbClosedFlag = False
if not DbPath:
DbPath = os.path.normpath(os.path.join(GlobalData.gWorkspace, self._DB_PATH_))
# don't create necessary path for db in memory
if DbPath != ':memory:':
DbDir = os.path.split(DbPath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
# remove db file in case inconsistency between db and file in file system
if self._CheckWhetherDbNeedRenew(RenewDb, DbPath):
os.remove(DbPath)
# create db with optimized parameters
self.Conn = sqlite3.connect(DbPath, isolation_level='DEFERRED')
self.Conn.execute("PRAGMA synchronous=OFF")
self.Conn.execute("PRAGMA temp_store=MEMORY")
self.Conn.execute("PRAGMA count_changes=OFF")
self.Conn.execute("PRAGMA cache_size=8192")
#self.Conn.execute("PRAGMA page_size=8192")
# to avoid non-ascii character conversion issue
self.Conn.text_factory = str
self.Cur = self.Conn.cursor()
# create table for internal uses
self.TblDataModel = TableDataModel(self.Cur)
self.TblFile = TableFile(self.Cur)
self.Platform = None
# conversion object for build or file format conversion purpose
self.BuildObject = WorkspaceDatabase.BuildObjectFactory(self)
self.TransformObject = WorkspaceDatabase.TransformObjectFactory(self)
## Check whether workspace database need to be renew.
# The renew reason maybe:
# 1) If user force to renew;
# 2) If user do not force renew, and
# a) If the time of last modified python source is newer than database file;
# b) If the time of last modified frozen executable file is newer than database file;
#
# @param force User force renew database
# @param DbPath The absolute path of workspace database file
#
# @return Bool value for whether need renew workspace databse
#
def _CheckWhetherDbNeedRenew (self, force, DbPath):
# if database does not exist, we need do nothing
if not os.path.exists(DbPath): return False
# if user force to renew database, then not check whether database is out of date
if force: return True
#
# Check the time of last modified source file or build.exe
# if is newer than time of database, then database need to be re-created.
#
timeOfToolModified = 0
if hasattr(sys, "frozen"):
exePath = os.path.abspath(sys.executable)
timeOfToolModified = os.stat(exePath).st_mtime
else:
curPath = os.path.dirname(__file__) # curPath is the path of WorkspaceDatabase.py
rootPath = os.path.split(curPath)[0] # rootPath is root path of python source, such as /BaseTools/Source/Python
if rootPath == "" or rootPath == None:
EdkLogger.verbose("\nFail to find the root path of build.exe or python sources, so can not \
determine whether database file is out of date!\n")
# walk the root path of source or build's binary to get the time last modified.
for root, dirs, files in os.walk (rootPath):
for dir in dirs:
# bypass source control folder
if dir.lower() in [".svn", "_svn", "cvs"]:
dirs.remove(dir)
for file in files:
ext = os.path.splitext(file)[1]
if ext.lower() == ".py": # only check .py files
fd = os.stat(os.path.join(root, file))
if timeOfToolModified < fd.st_mtime:
timeOfToolModified = fd.st_mtime
if timeOfToolModified > os.stat(DbPath).st_mtime:
EdkLogger.verbose("\nWorkspace database is out of data!")
return True
return False
## Initialize build database
def InitDatabase(self):
EdkLogger.verbose("\nInitialize build database started ...")
#
# Create new tables
#
self.TblDataModel.Create(False)
self.TblFile.Create(False)
#
# Initialize table DataModel
#
self.TblDataModel.InitTable()
EdkLogger.verbose("Initialize build database ... DONE!")
## Query a table
#
# @param Table: The instance of the table to be queried
#
def QueryTable(self, Table):
Table.Query()
def __del__(self):
self.Close()
## Close entire database
#
# Commit all first
# Close the connection and cursor
#
def Close(self):
if not self._DbClosedFlag:
self.Conn.commit()
self.Cur.close()
self.Conn.close()
self._DbClosedFlag = True
## Summarize all packages in the database
def GetPackageList(self, Platform, Arch, TargetName, ToolChainTag):
self.Platform = Platform
PackageList =[]
Pa = self.BuildObject[self.Platform, 'COMMON']
#
# Get Package related to Modules
#
for Module in Pa.Modules:
ModuleObj = self.BuildObject[Module, Arch, TargetName, ToolChainTag]
for Package in ModuleObj.Packages:
if Package not in PackageList:
PackageList.append(Package)
#
# Get Packages related to Libraries
#
for Lib in Pa.LibraryInstances:
LibObj = self.BuildObject[Lib, Arch, TargetName, ToolChainTag]
for Package in LibObj.Packages:
if Package not in PackageList:
PackageList.append(Package)
return PackageList
## Summarize all platforms in the database
def _GetPlatformList(self):
PlatformList = []
for PlatformFile in self.TblFile.GetFileList(MODEL_FILE_DSC):
try:
Platform = self.BuildObject[PathClass(PlatformFile), 'COMMON']
except:
Platform = None
if Platform != None:
PlatformList.append(Platform)
return PlatformList
PlatformList = property(_GetPlatformList)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| gpl-2.0 |
eight-pack-abdominals/androguard | androguard/decompiler/dad/node.py | 31 | 4898 | # This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MakeProperties(type):
def __init__(cls, name, bases, dct):
def _wrap_set(names, name):
def fun(self, value):
for field in names:
self.__dict__[field] = (name == field) and value
return fun
def _wrap_get(name):
def fun(self):
return self.__dict__[name]
return fun
super(MakeProperties, cls).__init__(name, bases, dct)
attrs = []
prefixes = ('_get_', '_set_')
for key in dct.keys():
for prefix in prefixes:
if key.startswith(prefix):
attrs.append(key[4:])
delattr(cls, key)
for attr in attrs:
setattr(cls, attr[1:],
property(_wrap_get(attr), _wrap_set(attrs, attr)))
cls._attrs = attrs
def __call__(cls, *args, **kwds):
obj = super(MakeProperties, cls).__call__(*args, **kwds)
for attr in cls._attrs:
obj.__dict__[attr] = False
return obj
class LoopType(object):
__metaclass__ = MakeProperties
_set_is_pretest = _set_is_posttest = _set_is_endless = None
_get_is_pretest = _get_is_posttest = _get_is_endless = None
def copy(self):
res = LoopType()
for key, value in self.__dict__.iteritems():
setattr(res, key, value)
return res
class NodeType(object):
__metaclass__ = MakeProperties
_set_is_cond = _set_is_switch = _set_is_stmt = None
_get_is_cond = _get_is_switch = _get_is_stmt = None
_set_is_return = _set_is_throw = None
_get_is_return = _get_is_throw = None
def copy(self):
res = NodeType()
for key, value in self.__dict__.iteritems():
setattr(res, key, value)
return res
class Node(object):
def __init__(self, name):
self.name = name
self.num = 0
self.follow = {'if': None, 'loop': None, 'switch': None}
self.looptype = LoopType()
self.type = NodeType()
self.in_catch = False
self.interval = None
self.startloop = False
self.latch = None
self.loop_nodes = []
def copy_from(self, node):
self.num = node.num
self.looptype = node.looptype.copy()
self.interval = node.interval
self.startloop = node.startloop
self.type = node.type.copy()
self.follow = node.follow.copy()
self.latch = node.latch
self.loop_nodes = node.loop_nodes
self.in_catch = node.in_catch
def update_attribute_with(self, n_map):
self.latch = n_map.get(self.latch, self.latch)
for follow_type, value in self.follow.iteritems():
self.follow[follow_type] = n_map.get(value, value)
self.loop_nodes = list(set(n_map.get(n, n) for n in self.loop_nodes))
def get_head(self):
return self
def get_end(self):
return self
def __repr__(self):
return '%s' % self
class Interval(object):
def __init__(self, head):
self.name = 'Interval-%s' % head.name
self.content = set([head])
self.end = None
self.head = head
self.in_catch = head.in_catch
head.interval = self
def __contains__(self, item):
# If the interval contains nodes, check if the item is one of them
if item in self.content:
return True
# If the interval contains intervals, we need to check them
return any(item in node for node in self.content
if isinstance(node, Interval))
def add_node(self, node):
if node in self.content:
return False
self.content.add(node)
node.interval = self
return True
def compute_end(self, graph):
for node in self.content:
for suc in graph.sucs(node):
if suc not in self.content:
self.end = node
def get_end(self):
return self.end.get_end()
def get_head(self):
return self.head.get_head()
def __len__(self):
return len(self.content)
def __repr__(self):
return '%s(%s)' % (self.name, self.content)
| apache-2.0 |
retomerz/intellij-community | python/helpers/py2only/docutils/utils/smartquotes.py | 86 | 33385 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# :Id: $Id: smartquotes.py 7716 2013-08-21 21:54:57Z milde $
# :Copyright: © 2010 Günter Milde,
# original `SmartyPants`_: © 2003 John Gruber
# smartypants.py: © 2004, 2007 Chad Miller
# :Maintainer: docutils-develop@lists.sourceforge.net
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notices and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
r"""
========================
SmartyPants for Docutils
========================
Synopsis
========
Smart-quotes for Docutils.
The original "SmartyPants" is a free web publishing plug-in for Movable Type,
Blosxom, and BBEdit that easily translates plain ASCII punctuation characters
into "smart" typographic punctuation characters.
`smartypants.py`, endeavours to be a functional port of
SmartyPants to Python, for use with Pyblosxom_.
`smartquotes.py` is an adaption of Smartypants to Docutils_. By using Unicode
characters instead of HTML entities for typographic quotes, it works for any
output format that supports Unicode.
Authors
=======
`John Gruber`_ did all of the hard work of writing this software in Perl for
`Movable Type`_ and almost all of this useful documentation. `Chad Miller`_
ported it to Python to use with Pyblosxom_.
Adapted to Docutils_ by Günter Milde
Additional Credits
==================
Portions of the SmartyPants original work are based on Brad Choate's nifty
MTRegex plug-in. `Brad Choate`_ also contributed a few bits of source code to
this plug-in. Brad Choate is a fine hacker indeed.
`Jeremy Hedley`_ and `Charles Wiltgen`_ deserve mention for exemplary beta
testing of the original SmartyPants.
`Rael Dornfest`_ ported SmartyPants to Blosxom.
.. _Brad Choate: http://bradchoate.com/
.. _Jeremy Hedley: http://antipixel.com/
.. _Charles Wiltgen: http://playbacktime.com/
.. _Rael Dornfest: http://raelity.org/
Copyright and License
=====================
SmartyPants_ license (3-Clause BSD license):
Copyright (c) 2003 John Gruber (http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors
"as is" and any express or implied warranties, including, but not
limited to, the implied warranties of merchantability and fitness for
a particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license (2-Clause BSD license):
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors
"as is" and any express or implied warranties, including, but not
limited to, the implied warranties of merchantability and fitness for
a particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _John Gruber: http://daringfireball.net/
.. _Chad Miller: http://web.chad.org/
.. _Pyblosxom: http://pyblosxom.bluesock.org/
.. _SmartyPants: http://daringfireball.net/projects/smartypants/
.. _Movable Type: http://www.movabletype.org/
.. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
.. _Docutils: http://docutils.sf.net/
Description
===========
SmartyPants can perform the following transformations:
- Straight quotes ( " and ' ) into "curly" quote characters
- Backticks-style quotes (\`\`like this'') into "curly" quote characters
- Dashes (``--`` and ``---``) into en- and em-dash entities
- Three consecutive dots (``...`` or ``. . .``) into an ellipsis entity
This means you can write, edit, and save your posts using plain old
ASCII straight quotes, plain dashes, and plain dots, but your published
posts (and final HTML output) will appear with smart quotes, em-dashes,
and proper ellipses.
SmartyPants does not modify characters within ``<pre>``, ``<code>``, ``<kbd>``,
``<math>`` or ``<script>`` tag blocks. Typically, these tags are used to
display text where smart quotes and other "smart punctuation" would not be
appropriate, such as source code or example markup.
Backslash Escapes
=================
If you need to use literal straight quotes (or plain hyphens and
periods), SmartyPants accepts the following backslash escape sequences
to force non-smart punctuation. It does so by transforming the escape
sequence into a character:
======== ===== =========
Escape Value Character
======== ===== =========
``\\\\`` \ \\
\\" " "
\\' ' '
\\. . .
\\- - \-
\\` ` \`
======== ===== =========
This is useful, for example, when you want to use straight quotes as
foot and inch marks: 6\\'2\\" tall; a 17\\" iMac.
Options
=======
For Pyblosxom users, the ``smartypants_attributes`` attribute is where you
specify configuration options.
Numeric values are the easiest way to configure SmartyPants' behavior:
"0"
Suppress all transformations. (Do nothing.)
"1"
Performs default SmartyPants transformations: quotes (including
\`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash)
is used to signify an em-dash; there is no support for en-dashes.
"2"
Same as smarty_pants="1", except that it uses the old-school typewriter
shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``"
(dash dash dash)
for em-dashes.
"3"
Same as smarty_pants="2", but inverts the shorthand for dashes:
"``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for
en-dashes.
"-1"
Stupefy mode. Reverses the SmartyPants transformation process, turning
the characters produced by SmartyPants into their ASCII equivalents.
E.g. "“" is turned into a simple double-quote (\"), "—" is
turned into two dashes, etc.
The following single-character attribute values can be combined to toggle
individual transformations from within the smarty_pants attribute. For
example, to educate normal quotes and em-dashes, but not ellipses or
\`\`backticks'' -style quotes:
``py['smartypants_attributes'] = "1"``
"q"
Educates normal quote characters: (") and (').
"b"
Educates \`\`backticks'' -style double quotes.
"B"
Educates \`\`backticks'' -style double quotes and \`single' quotes.
"d"
Educates em-dashes.
"D"
Educates em-dashes and en-dashes, using old-school typewriter shorthand:
(dash dash) for en-dashes, (dash dash dash) for em-dashes.
"i"
Educates em-dashes and en-dashes, using inverted old-school typewriter
shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes.
"e"
Educates ellipses.
"w"
Translates any instance of ``"`` into a normal double-quote character.
This should be of no interest to most people, but of particular interest
to anyone who writes their posts using Dreamweaver, as Dreamweaver
inexplicably uses this entity to represent a literal double-quote
character. SmartyPants only educates normal quotes, not entities (because
ordinarily, entities are used for the explicit purpose of representing the
specific character they represent). The "w" option must be used in
conjunction with one (or both) of the other quote options ("q" or "b").
Thus, if you wish to apply all SmartyPants transformations (quotes, en-
and em-dashes, and ellipses) and also translate ``"`` entities into
regular quotes so SmartyPants can educate them, you should pass the
following to the smarty_pants attribute:
Caveats
=======
Why You Might Not Want to Use Smart Quotes in Your Weblog
---------------------------------------------------------
For one thing, you might not care.
Most normal, mentally stable individuals do not take notice of proper
typographic punctuation. Many design and typography nerds, however, break
out in a nasty rash when they encounter, say, a restaurant sign that uses
a straight apostrophe to spell "Joe's".
If you're the sort of person who just doesn't care, you might well want to
continue not caring. Using straight quotes -- and sticking to the 7-bit
ASCII character set in general -- is certainly a simpler way to live.
Even if you I *do* care about accurate typography, you still might want to
think twice before educating the quote characters in your weblog. One side
effect of publishing curly quote characters is that it makes your
weblog a bit harder for others to quote from using copy-and-paste. What
happens is that when someone copies text from your blog, the copied text
contains the 8-bit curly quote characters (as well as the 8-bit characters
for em-dashes and ellipses, if you use these options). These characters
are not standard across different text encoding methods, which is why they
need to be encoded as characters.
People copying text from your weblog, however, may not notice that you're
using curly quotes, and they'll go ahead and paste the unencoded 8-bit
characters copied from their browser into an email message or their own
weblog. When pasted as raw "smart quotes", these characters are likely to
get mangled beyond recognition.
That said, my own opinion is that any decent text editor or email client
makes it easy to stupefy smart quote characters into their 7-bit
equivalents, and I don't consider it my problem if you're using an
indecent text editor or email client.
Algorithmic Shortcomings
------------------------
One situation in which quotes will get curled the wrong way is when
apostrophes are used at the start of leading contractions. For example:
``'Twas the night before Christmas.``
In the case above, SmartyPants will turn the apostrophe into an opening
single-quote, when in fact it should be a closing one. I don't think
this problem can be solved in the general case -- every word processor
I've tried gets this wrong as well. In such cases, it's best to use the
proper character for closing single-quotes (``’``) by hand.
Version History
===============
1.7 2012-11-19
- Internationalization: language-dependent quotes.
1.6.1: 2012-11-06
- Refactor code, code cleanup,
- `educate_tokens()` generator as interface for Docutils.
1.6: 2010-08-26
- Adaption to Docutils:
- Use Unicode instead of HTML entities,
- Remove code special to pyblosxom.
1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400
- Fixed bug where blocks of precious unalterable text was instead
interpreted. Thanks to Le Roux and Dirk van Oosterbosch.
1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400
- Fix bogus magical quotation when there is no hint that the
user wants it, e.g., in "21st century". Thanks to Nathan Hamblen.
- Be smarter about quotes before terminating numbers in an en-dash'ed
range.
1.5_1.4: Thu, 10 Feb 2005 20:24:36 -0500
- Fix a date-processing bug, as reported by jacob childress.
- Begin a test-suite for ensuring correct output.
- Removed import of "string", since I didn't really need it.
(This was my first every Python program. Sue me!)
1.5_1.3: Wed, 15 Sep 2004 18:25:58 -0400
- Abort processing if the flavour is in forbidden-list. Default of
[ "rss" ] (Idea of Wolfgang SCHNERRING.)
- Remove stray virgules from en-dashes. Patch by Wolfgang SCHNERRING.
1.5_1.2: Mon, 24 May 2004 08:14:54 -0400
- Some single quotes weren't replaced properly. Diff-tesuji played
by Benjamin GEIGER.
1.5_1.1: Sun, 14 Mar 2004 14:38:28 -0500
- Support upcoming pyblosxom 0.9 plugin verification feature.
1.5_1.0: Tue, 09 Mar 2004 08:08:35 -0500
- Initial release
"""
default_smartypants_attr = "1"
import re
class smartchars(object):
"""Smart quotes and dashes
"""
endash = u'–' # "–" EN DASH
emdash = u'—' # "—" EM DASH
ellipsis = u'…' # "…" HORIZONTAL ELLIPSIS
# quote characters (language-specific, set in __init__())
#
# English smart quotes (open primary, close primary, open secondary, close
# secondary) are:
# opquote = u'“' # "“" LEFT DOUBLE QUOTATION MARK
# cpquote = u'”' # "”" RIGHT DOUBLE QUOTATION MARK
# osquote = u'‘' # "‘" LEFT SINGLE QUOTATION MARK
# csquote = u'’' # "’" RIGHT SINGLE QUOTATION MARK
# For other languages see:
# http://en.wikipedia.org/wiki/Non-English_usage_of_quotation_marks
# http://de.wikipedia.org/wiki/Anf%C3%BChrungszeichen#Andere_Sprachen
quotes = {'af': u'“”‘’',
'af-x-altquot': u'„”‚’',
'ca': u'«»“”',
'ca-x-altquot': u'“”‘’',
'cs': u'„“‚‘',
'cs-x-altquot': u'»«›‹',
'da': u'»«‘’',
'da-x-altquot': u'„“‚‘',
'de': u'„“‚‘',
'de-x-altquot': u'»«›‹',
'de-CH': u'«»‹›',
'el': u'«»“”',
'en': u'“”‘’',
'en-UK': u'‘’“”',
'eo': u'“”‘’',
'es': u'«»“”',
'et': u'„“‚‘', # no secondary quote listed in
'et-x-altquot': u'»«›‹', # the sources above (wikipedia.org)
'eu': u'«»‹›',
'es-x-altquot': u'“”‘’',
'fi': u'””’’',
'fi-x-altquot': u'»»’’',
'fr': (u'« ', u' »', u'‹ ', u' ›'), # with narrow no-break space
'fr-x-altquot': u'«»‹›', # for use with manually set spaces
# 'fr-x-altquot': (u'“ ', u' ”', u'‘ ', u' ’'), # rarely used
'fr-CH': u'«»‹›',
'gl': u'«»“”',
'he': u'”“»«',
'he-x-altquot': u'„”‚’',
'it': u'«»“”',
'it-CH': u'«»‹›',
'it-x-altquot': u'“”‘’',
'ja': u'「」『』',
'lt': u'„“‚‘',
'nl': u'“”‘’',
'nl-x-altquot': u'„”‚’',
'pl': u'„”«»',
'pl-x-altquot': u'«»“”',
'pt': u'«»“”',
'pt-BR': u'“”‘’',
'ro': u'„”«»',
'ro-x-altquot': u'«»„”',
'ru': u'«»„“',
'sk': u'„“‚‘',
'sk-x-altquot': u'»«›‹',
'sv': u'„“‚‘',
'sv-x-altquot': u'»«›‹',
'zh-CN': u'“”‘’',
'it': u'«»“”',
'zh-TW': u'「」『』',
}
def __init__(self, language='en'):
self.language = language
try:
(self.opquote, self.cpquote,
self.osquote, self.csquote) = self.quotes[language]
except KeyError:
self.opquote, self.cpquote, self.osquote, self.csquote = u'""\'\''
def smartyPants(text, attr=default_smartypants_attr, language='en'):
"""Main function for "traditional" use."""
return "".join([t for t in educate_tokens(tokenize(text),
attr, language)])
def educate_tokens(text_tokens, attr=default_smartypants_attr, language='en'):
"""Return iterator that "educates" the items of `text_tokens`.
"""
# Parse attributes:
# 0 : do nothing
# 1 : set all
# 2 : set all, using old school en- and em- dash shortcuts
# 3 : set all, using inverted old school en and em- dash shortcuts
#
# q : quotes
# b : backtick quotes (``double'' only)
# B : backtick quotes (``double'' and `single')
# d : dashes
# D : old school dashes
# i : inverted old school dashes
# e : ellipses
# w : convert " entities to " for Dreamweaver users
convert_quot = False # translate " entities into normal quotes?
do_dashes = False
do_backticks = False
do_quotes = False
do_ellipses = False
do_stupefy = False
if attr == "0": # Do nothing.
yield text
elif attr == "1": # Do everything, turn all options on.
do_quotes = True
do_backticks = True
do_dashes = 1
do_ellipses = True
elif attr == "2":
# Do everything, turn all options on, use old school dash shorthand.
do_quotes = True
do_backticks = True
do_dashes = 2
do_ellipses = True
elif attr == "3":
# Do everything, use inverted old school dash shorthand.
do_quotes = True
do_backticks = True
do_dashes = 3
do_ellipses = True
elif attr == "-1": # Special "stupefy" mode.
do_stupefy = True
else:
if "q" in attr: do_quotes = True
if "b" in attr: do_backticks = True
if "B" in attr: do_backticks = 2
if "d" in attr: do_dashes = 1
if "D" in attr: do_dashes = 2
if "i" in attr: do_dashes = 3
if "e" in attr: do_ellipses = True
if "w" in attr: convert_quot = True
prev_token_last_char = " "
# Last character of the previous text token. Used as
# context to curl leading quote characters correctly.
for (ttype, text) in text_tokens:
# skip HTML and/or XML tags as well as emtpy text tokens
# without updating the last character
if ttype == 'tag' or not text:
yield text
continue
# skip literal text (math, literal, raw, ...)
if ttype == 'literal':
prev_token_last_char = text[-1:]
yield text
continue
last_char = text[-1:] # Remember last char before processing.
text = processEscapes(text)
if convert_quot:
text = re.sub('"', '"', text)
if do_dashes == 1:
text = educateDashes(text)
elif do_dashes == 2:
text = educateDashesOldSchool(text)
elif do_dashes == 3:
text = educateDashesOldSchoolInverted(text)
if do_ellipses:
text = educateEllipses(text)
# Note: backticks need to be processed before quotes.
if do_backticks:
text = educateBackticks(text, language)
if do_backticks == 2:
text = educateSingleBackticks(text, language)
if do_quotes:
text = educateQuotes(prev_token_last_char+text, language)[1:]
if do_stupefy:
text = stupefyEntities(text, language)
# Remember last char as context for the next token
prev_token_last_char = last_char
text = processEscapes(text, restore=True)
yield text
def educateQuotes(text, language='en'):
"""
Parameter: - text string (unicode or bytes).
- language (`BCP 47` language tag.)
Returns: The `text`, with "educated" curly quote characters.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?“;
"""
smart = smartchars(language)
# oldtext = text
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break.
# Close the quotes by brute force:
text = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), smart.csquote, text)
text = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), smart.cpquote, text)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
text = re.sub(r""""'(?=\w)""", smart.opquote+smart.osquote, text)
text = re.sub(r"""'"(?=\w)""", smart.osquote+smart.opquote, text)
# Special case for decade abbreviations (the '80s):
text = re.sub(r"""\b'(?=\d{2}s)""", smart.csquote, text)
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
text = opening_single_quotes_regex.sub(r'\1'+smart.osquote, text)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
text = closing_single_quotes_regex.sub(r'\1'+smart.csquote, text)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
text = closing_single_quotes_regex.sub(r'\1%s\2' % smart.csquote, text)
# Any remaining single quotes should be opening ones:
text = re.sub(r"""'""", smart.osquote, text)
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
text = opening_double_quotes_regex.sub(r'\1'+smart.opquote, text)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
text = closing_double_quotes_regex.sub(smart.cpquote, text)
closing_double_quotes_regex = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
text = closing_double_quotes_regex.sub(r'\1'+smart.cpquote, text)
# Any remaining quotes should be opening ones.
text = re.sub(r'"', smart.opquote, text)
return text
def educateBackticks(text, language='en'):
"""
Parameter: String (unicode or bytes).
Returns: The `text`, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?“;
"""
smart = smartchars(language)
text = re.sub(r"""``""", smart.opquote, text)
text = re.sub(r"""''""", smart.cpquote, text)
return text
def educateSingleBackticks(text, language='en'):
"""
Parameter: String (unicode or bytes).
Returns: The `text`, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
smart = smartchars(language)
text = re.sub(r"""`""", smart.osquote, text)
text = re.sub(r"""'""", smart.csquote, text)
return text
def educateDashes(text):
"""
Parameter: String (unicode or bytes).
Returns: The `text`, with each instance of "--" translated to
an em-dash character.
"""
text = re.sub(r"""---""", smartchars.endash, text) # en (yes, backwards)
text = re.sub(r"""--""", smartchars.emdash, text) # em (yes, backwards)
return text
def educateDashesOldSchool(text):
"""
Parameter: String (unicode or bytes).
Returns: The `text`, with each instance of "--" translated to
an en-dash character, and each "---" translated to
an em-dash character.
"""
text = re.sub(r"""---""", smartchars.emdash, text)
text = re.sub(r"""--""", smartchars.endash, text)
return text
def educateDashesOldSchoolInverted(text):
"""
Parameter: String (unicode or bytes).
Returns: The `text`, with each instance of "--" translated to
an em-dash character, and each "---" translated to
an en-dash character. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
text = re.sub(r"""---""", smartchars.endash, text) # em
text = re.sub(r"""--""", smartchars.emdash, text) # en
return text
def educateEllipses(text):
"""
Parameter: String (unicode or bytes).
Returns: The `text`, with each instance of "..." translated to
an ellipsis character.
Example input: Huh...?
Example output: Huh…?
"""
text = re.sub(r"""\.\.\.""", smartchars.ellipsis, text)
text = re.sub(r"""\. \. \.""", smartchars.ellipsis, text)
return text
def stupefyEntities(text, language='en'):
"""
Parameter: String (unicode or bytes).
Returns: The `text`, with each SmartyPants character translated to
its ASCII counterpart.
Example input: “Hello — world.”
Example output: "Hello -- world."
"""
smart = smartchars(language)
text = re.sub(smart.endash, "-", text) # en-dash
text = re.sub(smart.emdash, "--", text) # em-dash
text = re.sub(smart.osquote, "'", text) # open single quote
text = re.sub(smart.csquote, "'", text) # close single quote
text = re.sub(smart.opquote, '"', text) # open double quote
text = re.sub(smart.cpquote, '"', text) # close double quote
text = re.sub(smart.ellipsis, '...', text)# ellipsis
return text
def processEscapes(text, restore=False):
r"""
Parameter: String (unicode or bytes).
Returns: The `text`, with after processing the following backslash
escape sequences. This is useful if you want to force a "dumb"
quote or other character to appear.
Escape Value
------ -----
\\ \
\" "
\' '
\. .
\- -
\` `
"""
replacements = ((r'\\', r'\'),
(r'\"', r'"'),
(r"\'", r'''),
(r'\.', r'.'),
(r'\-', r'-'),
(r'\`', r'`'))
if restore:
for (ch, rep) in replacements:
text = text.replace(rep, ch[1])
else:
for (ch, rep) in replacements:
text = text.replace(ch, rep)
return text
def tokenize(text):
"""
Parameter: String containing HTML markup.
Returns: An iterator that yields the tokens comprising the input
string. Each token is either a tag (possibly with nested,
tags contained therein, such as <a href="<MTFoo>">, or a
run of text between tags. Each yielded element is a
two-element tuple; the first is either 'tag' or 'text';
the second is the actual value.
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
<http://www.bradchoate.com/past/mtregex.php>
"""
pos = 0
length = len(text)
# tokens = []
depth = 6
nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
#match = r"""(?: <! ( -- .*? -- \s* )+ > ) | # comments
# (?: <\? .*? \?> ) | # directives
# %s # nested tags """ % (nested_tags,)
tag_soup = re.compile(r"""([^<]*)(<[^>]*>)""")
token_match = tag_soup.search(text)
previous_end = 0
while token_match is not None:
if token_match.group(1):
yield ('text', token_match.group(1))
yield ('tag', token_match.group(2))
previous_end = token_match.end()
token_match = tag_soup.search(text, token_match.end())
if previous_end < len(text):
yield ('text', text[previous_end:])
if __name__ == "__main__":
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_string
docstring_html = publish_string(__doc__, writer_name='html')
print docstring_html
# Unit test output goes out stderr.
import unittest
sp = smartyPants
class TestSmartypantsAllAttributes(unittest.TestCase):
# the default attribute is "1", which means "all".
def test_dates(self):
self.assertEqual(sp("1440-80's"), u"1440-80’s")
self.assertEqual(sp("1440-'80s"), u"1440-‘80s")
self.assertEqual(sp("1440---'80s"), u"1440–‘80s")
self.assertEqual(sp("1960s"), "1960s") # no effect.
self.assertEqual(sp("1960's"), u"1960’s")
self.assertEqual(sp("one two '60s"), u"one two ‘60s")
self.assertEqual(sp("'60s"), u"‘60s")
def test_ordinal_numbers(self):
self.assertEqual(sp("21st century"), "21st century") # no effect.
self.assertEqual(sp("3rd"), "3rd") # no effect.
def test_educated_quotes(self):
self.assertEqual(sp('''"Isn't this fun?"'''), u'“Isn’t this fun?”')
def test_html_tags(self):
text = '<a src="foo">more</a>'
self.assertEqual(sp(text), text)
unittest.main()
__author__ = "Chad Miller <smartypantspy@chad.org>"
__version__ = "1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400"
__url__ = "http://wiki.chad.org/SmartyPantsPy"
__description__ = "Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom"
| apache-2.0 |
jtackaberry/stagehand | stagehand/searchers/base.py | 1 | 16378 | import os
import re
import asyncio
import functools
import logging
from ..config import config
from ..utils import remove_stop_words
log = logging.getLogger('stagehand.searchers')
class SearcherError(Exception):
pass
class SearcherBase:
# Values must be supplied by subclasses.
# The internal name of the plugin (lowercase, no spaces).
NAME = None
# The human-readable name for the plugin.
PRINTABLE_NAME = None
# The type of retriever plugin required to fetch search results of this
# searcher.
TYPE = None
# False if the user may disable the plugin, or True if it is always active.
ALWAYS_ENABLED = False
# Constants for clean_title()
CLEAN_APOSTROPHE_LEAVE = 0
CLEAN_APOSTROPHE_REMOVE = 1
CLEAN_APOSTROPHE_REGEXP = 2
def __init__(self, loop=None):
super().__init__()
self._loop = loop or asyncio.get_event_loop()
def _parse_hsize(self, size):
if isinstance(size, int):
return size
parts = size.lower().split()
if not parts:
return 0
sz = float(parts[0].replace(',', ''))
if len(parts) == 2:
mult = {
'gib': 1024*1024*1024,
'gb': 1000*1000*1000,
'mib': 1024*1024,
'mb': 1000*1000,
'kib': 1024,
'kb': 1000
}.get(parts[1], 1)
return int(sz * mult)
return int(sz)
def _cmp_result(self, ep, ideal_size, a, b):
# Hideous and improperly hardcoded logic follows.
inf = float('inf')
exts = {
# Want.
'mkv': 3, 'mp4': 2, 'avi': 1,
# Don't want.
'wmv': -inf, 'mpg': -inf, 'ts': -inf, 'rar': -inf, 'r\d\d': -inf,
}
av = {
(r'[xh]\.?26[45]', r'(ac-?3|dts|dd5\.?1)'): 10,
(r'[xh]\.?26[45]', None): 9,
(None, r'(ac-?3|dts)'): 8,
(None, r'aac\.?2?'): -1
}
res = {'1080p': 2, '720p': 1}
mods = {r'blu-?ray': 10, 'proper': 9, r're-?pack': 7, 'immerse': 6,
'dimension': 5, 'nlsubs': 4, 'web-?dl': 3}
aname = a.filename.lower()
bname = b.filename.lower()
aext = os.path.splitext(aname)[-1].lstrip('.')
bext = os.path.splitext(bname)[-1].lstrip('.')
# Prefer results that match filename over subject.
ascore = self._is_name_for_episode(a.filename, ep)
bscore = self._is_name_for_episode(b.filename, ep)
if ascore != bscore:
return 1 if bscore else -1
# Sort by extension
ascore = bscore = 0
for ext, score in exts.items():
if re.match(ext, aext):
ascore = score
if re.match(ext, bext):
bscore = score
if ascore == -inf:
a.disqualified = True
if bscore == -inf:
b.disqualified = True
if ascore != bscore:
return 1 if bscore > ascore else -1
# Sort by A/V format
ascore = bscore = 0
for (vformat, aformat), score in av.items():
vsearch = re.compile(r'[-. ]%s[-. $]' % vformat).search if vformat else bool
asearch = re.compile(r'[-. ]%s[-. $]' % aformat).search if aformat else bool
# Negative scores stick, but positive scores are replaced with
# higher positive scores.
if ascore >= 0 and vsearch(aname) and asearch(aname):
ascore = score if score > ascore or score < 0 else ascore
if bscore >= 0 and vsearch(bname) and asearch(bname):
bscore = score if score > bscore or score < 0 else bscore
if ascore != bscore:
return 1 if bscore > ascore else -1
# Sort by ideal size (if specified).
if ideal_size:
aratio = a.size / float(ideal_size)
bratio = b.size / float(ideal_size)
# If both sizes are within 20% of each other, treat them the same.
if 0.8 < (a.size / float(b.size)) < 1.2:
pass
# If both sizes are no worse than 60% of ideal size, or up to 4x ideal size,
# prefer the larger one
elif 0.6 < aratio < 4 and 0.6 < bratio < 4:
return 1 if b.size > a.size else -1
# Otherwise prefer the one closest to ideal.
else:
return 1 if abs(1-aratio) > abs(1-bratio) else -1
def score_by_search(items):
ascore = bscore = 0
for substr, score in items:
restr = re.compile(r'[-. ]%s[-. $]' % substr)
if restr.search(aname):
ascore = score
if restr.search(bname):
bscore = score
return ascore, bscore
# Sort by resolution
ascore, bscore = score_by_search(res.items())
if ascore != bscore:
return 1 if bscore > ascore else -1
# Sort by other modifiers
ascore, bscore = score_by_search(mods.items())
if ascore != bscore:
return 1 if bscore > ascore else -1
# Sort by date, preferring the newest (or the one which actually has a date)
if a.date != b.date:
return 1 if b.date and not a.date or (b.date and a.date and b.date > a.date) else -1
return 0
def _get_episode_codes_regexp_list(self, episodes, codes=True, dates=True):
parts = []
for ep in episodes or ():
if codes:
parts.append(ep.code)
parts.append('{0}x{1:02}'.format(ep.season.number, ep.number))
if dates:
dt = ep.airdatetime
if dt:
parts.append(r'{0}[-.]?{1:02}[-.]?{2:02}'.format(dt.year, dt.month, dt.day))
return parts
def _get_episode_codes_regexp(self, episodes, codes=True, dates=True):
parts = self._get_episode_codes_regexp_list(episodes, codes, dates)
if not parts:
return ''
elif len(parts) == 1:
return parts[0]
else:
return '(%s)' % '|'.join(parts)
def _is_name_for_episode(self, name, ep):
recode = re.compile(r'(\b|_){0}(\b|_)'.format(self._get_episode_codes_regexp([ep])), re.I)
if recode.search(name):
# Epcode matches, check for title.
title = ep.series.cfg.search_string or ep.series.name
title = self.clean_title(title, apostrophe=self.CLEAN_APOSTROPHE_REGEXP)
# Ensure each word in the title matches, but don't require them to be in
# the right order.
for word in title.split():
if not re.search(r'(\b|_)%s(\b|_)' % word, name, re.I):
break
else:
return True
return False
def clean_title(self, title, apostrophe=CLEAN_APOSTROPHE_LEAVE, parens=True):
"""
Strips punctutation and (optionally) parentheticals from a title to
improve searching.
:param title: the string to massage
:param apostrophe: one of the CLEAN_APOSTROPHE_* constants (below)
:param parens: if True, remove anything inside round parens. Otherwise,
the parens will be stripped but the contents left.
*apostrophe* can be:
* CLEAN_APOSTROPHE_LEAVE: don't do anything: foo's -> foo's
* CLEAN_APOSTROPHE_REMOVE: strip them: foo's -> foos
* CLEAN_APOSTROPHE_REGEXP: convert to regexp: foo's -> (foos|foo's)
"""
if parens:
# Remove anything in parens from the title (e.g. "The Office (US)")
title = re.sub(r'\s*\([^)]*\)', '', title)
# Substitute certain punctuation with spaces
title = re.sub(r'[&()\[\]*+,-./:;<=>?@\\^_{|}"]', ' ', title)
# And outright remove others
title = re.sub(r'[!"#$%:;<=>`]', '', title)
# Treat apostrophe separately
if apostrophe == self.CLEAN_APOSTROPHE_REMOVE:
title = title.replace("'", '')
elif apostrophe == self.CLEAN_APOSTROPHE_REGEXP:
# Replace "foo's" with "(foos|foo's)"
def replace_apostrophe(match):
return '(%s|%s)' % (match.group(1).replace("'", ''), match.group(1))
title = re.sub(r"(\S+'\S*)", replace_apostrophe, title)
title = remove_stop_words(title)
# Clean up multiple and trailing spaces.
return re.sub(r'\s+', ' ', title).strip()
@asyncio.coroutine
def _search(self, title, episodes, date, min_size, quality):
"""
Must return a dict of episode -> [list of SearchResult objects]. A
special key of None means the SearchResult list is not yet mapped
to an episode object, and it will be up to the caller (i.e. the main
search() method) to determine that.
Subclasses must override this method.
"""
raise NotImplementedError
@asyncio.coroutine
def search(self, series, episodes, date=None, min_size=None, ideal_size=None, quality='HD'):
results = yield from self._search(series, episodes, date, min_size, quality)
# Categorize SearchResults not assigned to episodes.
if None in results:
for result in results[None]:
for ep in episodes:
if self._is_name_for_episode(result.filename, ep):
results.setdefault(ep, []).append(result)
break
else:
# We couldn't match the filename for this result against any
# episode. Try matching against subject. FIXME: we need to
# be careful because subject may include other codes (e.g.
# "Some Show s01e01-s01e23" in the case of an archive
# bundle)
if result.subject:
for ep in episodes:
if self._is_name_for_episode(result.subject, ep):
results.setdefault(ep, []).append(result)
break
del results[None]
# Sort, remove disqualified results, and set common result attributes.
for ep, l in list(results.items()):
# Sorting also sets the disqualified attribute on the bad
# results.
cmpfunc = functools.partial(self._cmp_result, ep, ideal_size)
l.sort(key=functools.cmp_to_key(cmpfunc))
for result in l[:]:
if result.disqualified or result.size < min_size:
log.info('disqualifying result %s, size=%d min_size=%d', result, result.size, min_size)
l.remove(result)
else:
result.searcher = self.NAME
# We str(quality) because it may be a config Var object which can't
# be pickled, and we do need to be able to pickle SearchResult objects.
result.quality = str(quality)
if not l:
# We ended up disqualifying all the results. So remove this episode
# from the result set.
del results[ep]
else:
for n, result in enumerate(l, 1):
log.info('result: %s. %s', n, result)
return results
@asyncio.coroutine
def _get_retriever_data(self, search_result):
"""
Returns type-specific retriever data for the given search result.
See :meth:`SearchResult.get_retriever_data`
"""
raise NotImplementedError
def _check_results_equal(self, a, b):
raise NotImplementedError
class SearchResult:
# Type of search result. Only retrievers that support results of this
# type will be used.
type = None
# This is the name of the plugin that provided the result.
searcher = None
filename = None
subject = None
# Size is in bytes
size = None
date = None
newsgroup = None
# The quality level expected for this result (retrievers may verify).
quality = None
disqualified = False
def __init__(self, searcher, **kwargs):
self.type = searcher.TYPE
self.searcher = searcher.NAME
[setattr(self, k, v) for k, v in kwargs.items()]
# The cached entity from get_retriever_data(). This must not be
# pickled, since it could reference data that is not accessible between
# invocations. Just use NotImplemented as a sentinel to indicate it
# has not been populated.
self._rdata = NotImplemented
def __repr__(self):
return '<%s %s at 0x%x>' % (self.__class__.__name__, self.filename, id(self))
def __getstate__(self):
# Return all attributes except _rdata which mustn't be pickled.
d = self.__dict__.copy()
del d['_rdata']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self._rdata = NotImplemented
def _get_searcher(self):
"""
Return a new instance of the searcher plugin that provided this search
result.
It's possible that the plugin that provided the search result is no
longer available (because, e.g. the SearchResult object was pickled and
unpickled between invocations of Stagehand where the searcher plugin
has since failed to load).
It might be tempting to have searcher plugins subclass SearchResult and
implement the result-specific logic there rather than taking this
approach. But because the SearchResults are pickled and stored in the
database, and because plugins can fail (and so must be considered
transient), unpickling would fail. So we must only ever pickle core
SearchResult objects.
"""
# We commit this cardinal sin of importing inside a function in order
# to prevent an import loop, since __init__ imports us for SearcherError.
# It's safe from the usual pitfalls (i.e. importing inside a thread) since
# the module is guaranteed to already be loaded.
from . import plugins
if self.searcher not in plugins:
raise SearcherError('search result for unknown searcher plugin %s' % self.searcher)
return plugins[self.searcher].Searcher()
def __eq__(self, other):
if not isinstance(other, SearchResult) or self.type != other.type:
return False
return self._get_searcher()._check_results_equal(self, other)
@asyncio.coroutine
def get_retriever_data(self, force=False):
"""
Fetch whatever data is needed for a retriever to fetch this result.
The actual return value is dependent on the searcher type, and no
format is assumed or enforced here. It is a contract between the
searcher plugin and a retriever plugin.
:param force: if False (default), the data is cached so that subsequent
invocations don't call out to the plugin. If True,
it wil ask the plugin regardless of whether the value
was cached.
:returns: a type-specific object from the searcher plugin, guaranteed
to be non-zero
"""
if self._rdata is NotImplemented or force:
# Fetch the data and cache it for subsequent calls. We cache because
# retrievers may call get_retriever_data() multiple times (for
# multiple retriever plugins) but the actual operation could be
# expensive (e.g. fetching a torrent or nzb file off the network).
self._rdata = yield from self._get_searcher()._get_retriever_data(self)
if not self._rdata:
# This shouldn't happen. It's a bug in the searcher, which should
# have raised SearcherError instead.
raise SearcherError('searcher plugin did not provide retriever data for this result')
return self._rdata
def get_searcher_bind_address():
if config.searchers.bind_address:
if config.searchers.bind_address != '*':
return config.searchers.bind_address
elif config.misc.bind_address:
return config.misc.bind_address
| mit |
zephirefaith/AI_Fall15_Assignments | A2/lib/networkx/algorithms/centrality/tests/test_communicability.py | 85 | 5421 | from collections import defaultdict
from nose.tools import *
from nose import SkipTest
import networkx as nx
from networkx.algorithms.centrality.communicability_alg import *
class TestCommunicability:
@classmethod
def setupClass(cls):
global numpy
global scipy
try:
import numpy
except ImportError:
raise SkipTest('NumPy not available.')
try:
import scipy
except ImportError:
raise SkipTest('SciPy not available.')
def test_communicability_centrality(self):
answer={0: 1.5430806348152433, 1: 1.5430806348152433}
result=communicability_centrality(nx.path_graph(2))
for k,v in result.items():
assert_almost_equal(answer[k],result[k],places=7)
answer1={'1': 1.6445956054135658,
'Albert': 2.4368257358712189,
'Aric': 2.4368257358712193,
'Dan':3.1306328496328168,
'Franck': 2.3876142275231915}
G1=nx.Graph([('Franck','Aric'),('Aric','Dan'),('Dan','Albert'),
('Albert','Franck'),('Dan','1'),('Franck','Albert')])
result1=communicability_centrality(G1)
for k,v in result1.items():
assert_almost_equal(answer1[k],result1[k],places=7)
result1=communicability_centrality_exp(G1)
for k,v in result1.items():
assert_almost_equal(answer1[k],result1[k],places=7)
def test_communicability_betweenness_centrality(self):
answer={0: 0.07017447951484615, 1: 0.71565598701107991,
2: 0.71565598701107991, 3: 0.07017447951484615}
result=communicability_betweenness_centrality(nx.path_graph(4))
for k,v in result.items():
assert_almost_equal(answer[k],result[k],places=7)
answer1={'1': 0.060039074193949521,
'Albert': 0.315470761661372,
'Aric': 0.31547076166137211,
'Dan': 0.68297778678316201,
'Franck': 0.21977926617449497}
G1=nx.Graph([('Franck','Aric'),
('Aric','Dan'),('Dan','Albert'),('Albert','Franck'),
('Dan','1'),('Franck','Albert')])
result1=communicability_betweenness_centrality(G1)
for k,v in result1.items():
assert_almost_equal(answer1[k],result1[k],places=7)
def test_communicability_betweenness_centrality_small(self):
G = nx.Graph([(1,2)])
result=communicability_betweenness_centrality(G)
assert_equal(result, {1:0,2:0})
def test_communicability(self):
answer={0 :{0: 1.5430806348152435,
1: 1.1752011936438012
},
1 :{0: 1.1752011936438012,
1: 1.5430806348152435
}
}
# answer={(0, 0): 1.5430806348152435,
# (0, 1): 1.1752011936438012,
# (1, 0): 1.1752011936438012,
# (1, 1): 1.5430806348152435}
result=communicability(nx.path_graph(2))
for k1,val in result.items():
for k2 in val:
assert_almost_equal(answer[k1][k2],result[k1][k2],places=7)
def test_communicability2(self):
answer_orig ={('1', '1'): 1.6445956054135658,
('1', 'Albert'): 0.7430186221096251,
('1', 'Aric'): 0.7430186221096251,
('1', 'Dan'): 1.6208126320442937,
('1', 'Franck'): 0.42639707170035257,
('Albert', '1'): 0.7430186221096251,
('Albert', 'Albert'): 2.4368257358712189,
('Albert', 'Aric'): 1.4368257358712191,
('Albert', 'Dan'): 2.0472097037446453,
('Albert', 'Franck'): 1.8340111678944691,
('Aric', '1'): 0.7430186221096251,
('Aric', 'Albert'): 1.4368257358712191,
('Aric', 'Aric'): 2.4368257358712193,
('Aric', 'Dan'): 2.0472097037446457,
('Aric', 'Franck'): 1.8340111678944691,
('Dan', '1'): 1.6208126320442937,
('Dan', 'Albert'): 2.0472097037446453,
('Dan', 'Aric'): 2.0472097037446457,
('Dan', 'Dan'): 3.1306328496328168,
('Dan', 'Franck'): 1.4860372442192515,
('Franck', '1'): 0.42639707170035257,
('Franck', 'Albert'): 1.8340111678944691,
('Franck', 'Aric'): 1.8340111678944691,
('Franck', 'Dan'): 1.4860372442192515,
('Franck', 'Franck'): 2.3876142275231915}
answer=defaultdict(dict)
for (k1,k2),v in answer_orig.items():
answer[k1][k2]=v
G1=nx.Graph([('Franck','Aric'),('Aric','Dan'),('Dan','Albert'),
('Albert','Franck'),('Dan','1'),('Franck','Albert')])
result=communicability(G1)
for k1,val in result.items():
for k2 in val:
assert_almost_equal(answer[k1][k2],result[k1][k2],places=7)
result=communicability_exp(G1)
for k1,val in result.items():
for k2 in val:
assert_almost_equal(answer[k1][k2],result[k1][k2],places=7)
def test_estrada_index(self):
answer=1041.2470334195475
result=estrada_index(nx.karate_club_graph())
assert_almost_equal(answer,result,places=7)
| mit |
javrasya/luigi | luigi/parameter.py | 1 | 31866 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import abc
import datetime
import warnings
import json
from json import JSONEncoder
from collections import OrderedDict, Mapping
import operator
import functools
from ast import literal_eval
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from luigi import task_register
from luigi import six
from luigi import configuration
from luigi.cmdline_parser import CmdlineParser
_no_value = object()
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class Parameter(object):
"""
An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
.. code:: python
class MyTask(luigi.Task):
foo = luigi.Parameter()
class RequiringTask(luigi.Task):
def requires(self):
return MyTask(foo="hello")
def run(self):
print(self.requires().foo) # prints "hello"
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the
value will be resolved in this order of falling priority:
* Any value provided on the command line:
- To the root task (eg. ``--param xyz``)
- Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``).
* With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion`
* Any default value set using the ``default`` flag.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
Parameter objects may be reused, but you must then set the ``positional=False`` flag.
"""
_counter = 0 # non-atomically increasing counter used for ordering parameters.
def __init__(self, default=_no_value, is_global=False, significant=True, description=None,
config_path=None, positional=True, always_in_help=False):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. It's true by default but we recommend
``positional=False`` for abstract base classes and similar cases.
:param bool always_in_help: For the --help option in the command line
parsing. Set true to always show in --help.
"""
self._default = default
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.description = description
self.always_in_help = always_in_help
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self._config_path = config_path
self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class)
Parameter._counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError):
return _no_value
return self.parse(value)
def _get_value(self, task_name, param_name):
for value, warn in self._value_iterator(task_name, param_name):
if value != _no_value:
if warn:
warnings.warn(warn, DeprecationWarning)
return value
return _no_value
def _value_iterator(self, task_name, param_name):
"""
Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first.
"""
cp_parser = CmdlineParser.get_instance()
if cp_parser:
dest = self._parser_global_dest(param_name, task_name)
found = getattr(cp_parser.known_args, dest, None)
yield (self._parse_or_no_value(found), None)
yield (self._get_value_from_config(task_name, param_name), None)
yield (self._get_value_from_config(task_name, param_name.replace('_', '-')),
'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format(
task_name, param_name))
if self._config_path:
yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']),
'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format(
self._config_path['section'], self._config_path['name'], task_name, param_name))
yield (self._default, None)
def has_task_value(self, task_name, param_name):
return self._get_value(task_name, param_name) != _no_value
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return self.normalize(value)
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is the identity function, but subclasses should override
this method for specialized parsing.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
if not isinstance(x, six.string_types) and self.__class__ == Parameter:
warnings.warn("Parameter {0} is not of type string.".format(str(x)))
return str(x)
def normalize(self, x):
"""
Given a parsed parameter value, normalizes it.
The value can either be the result of parse(), the default value or
arguments passed into the task's constructor by instantiation.
This is very implementation defined, but can be used to validate/clamp
valid values. For example, if you wanted to only accept even integers,
and "correct" odd values to the nearest integer, you can implement
normalize as ``x // 2 * 2``.
"""
return x # default impl
def next_in_enumeration(self, _value):
"""
If your Parameter type has an enumerable ordering of values. You can
choose to override this method. This method is used by the
:py:mod:`luigi.execution_summary` module for pretty printing
purposes. Enabling it to pretty print tasks like ``MyTask(num=1),
MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``.
:param value: The value
:return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering.
"""
return None
def _parse_or_no_value(self, x):
if not x:
return _no_value
else:
return self.parse(x)
@staticmethod
def _parser_global_dest(param_name, task_name):
return task_name + '_' + param_name
@staticmethod
def _parser_action():
return "store"
_UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0)
class _DateParameterBase(Parameter):
"""
Base class Parameter for date (not datetime).
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DateParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH.date()
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
def parse(self, s):
"""
Parses a date string formatted like ``YYYY-MM-DD``.
"""
return datetime.datetime.strptime(s, self.date_format).date()
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
class DateParameter(_DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
DateParameters are 90% of the time used to be interpolated into file system paths or the like.
Here is a gentle reminder of how to interpolate date parameters into strings:
.. code:: python
class MyTask(luigi.Task):
date = luigi.DateParameter()
def run(self):
templated_path = "/my/path/to/my/dataset/{date:%Y/%m/%d}/"
instantiated_path = templated_path.format(date=self.date)
// print(instantiated_path) --> /my/path/to/my/dataset/2016/06/09/
// ... use instantiated_path ...
"""
date_format = '%Y-%m-%d'
def next_in_enumeration(self, value):
return value + datetime.timedelta(days=self.interval)
def normalize(self, value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = value.date()
delta = (value - self.start).days % self.interval
return value - datetime.timedelta(days=delta)
class MonthParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the month
(day of :py:class:`~datetime.date` is "rounded" to first of the month).
A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies
July of 2013.
"""
date_format = '%Y-%m'
def _add_months(self, date, months):
"""
Add ``months`` months to ``date``.
Unfortunately we can't use timedeltas to add months because timedelta counts in days
and there's no foolproof way to add N months in days without counting the number of
days per month.
"""
year = date.year + (date.month + months - 1) // 12
month = (date.month + months - 1) % 12 + 1
return datetime.date(year=year, month=month, day=1)
def next_in_enumeration(self, value):
return self._add_months(value, self.interval)
def normalize(self, value):
if value is None:
return None
months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month)
months_since_start -= months_since_start % self.interval
return self._add_months(self.start, months_since_start)
class YearParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the year
(day and month of :py:class:`~datetime.date` is "rounded" to first day of the year).
A YearParameter is a Date string formatted ``YYYY``.
"""
date_format = '%Y'
def next_in_enumeration(self, value):
return value.replace(year=value.year + self.interval)
def normalize(self, value):
if value is None:
return None
delta = (value.year - self.start.year) % self.interval
return datetime.date(year=value.year - delta, month=1, day=1)
class _DatetimeParameterBase(Parameter):
"""
Base class Parameter for datetime
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DatetimeParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
@abc.abstractproperty
def _timedelta(self):
"""
How to move one interval of this type forward (i.e. not counting self.interval).
"""
pass
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime`.
"""
return datetime.datetime.strptime(s, self.date_format)
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
def normalize(self, dt):
"""
Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at
:py:attr:`~_DatetimeParameterBase.start`.
"""
if dt is None:
return None
dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues.
delta = (dt - self.start).total_seconds()
granularity = (self._timedelta * self.interval).total_seconds()
return dt - datetime.timedelta(seconds=delta % granularity)
def next_in_enumeration(self, value):
return value + self._timedelta * self.interval
class DateHourParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
_timedelta = datetime.timedelta(hours=1)
class DateMinuteParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at
19:07.
The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute.
"""
date_format = '%Y-%m-%dT%H%M'
_timedelta = datetime.timedelta(minutes=1)
deprecated_date_format = '%Y-%m-%dT%HH%M'
def parse(self, s):
try:
value = datetime.datetime.strptime(s, self.deprecated_date_format)
warnings.warn(
'Using "H" between hours and minutes is deprecated, omit it instead.',
DeprecationWarning,
stacklevel=2
)
return value
except ValueError:
return super(DateMinuteParameter, self).parse(s)
class DateSecondParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the second.
A DateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the second. For example, ``2013-07-10T190738`` specifies July 10, 2013 at
19:07:38.
The interval parameter can be used to clamp this parameter to every N seconds, instead of every second.
"""
date_format = '%Y-%m-%dT%H%M%S'
_timedelta = datetime.timedelta(seconds=1)
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
def next_in_enumeration(self, value):
return value + 1
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``. This parameter have an implicit
default value of ``False``.
"""
def __init__(self, *args, **kwargs):
super(BoolParameter, self).__init__(*args, **kwargs)
if self._default == _no_value:
self._default = False
def parse(self, s):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
return {'true': True, 'false': False}[str(s).lower()]
def normalize(self, value):
# coerce anything truthy to True
return bool(value) if value is not None else None
@staticmethod
def _parser_action():
return 'store_true'
class BooleanParameter(BoolParameter):
"""
DEPRECATED. Use :py:class:`~BoolParameter`
"""
def __init__(self, *args, **kwargs):
warnings.warn(
'BooleanParameter is deprecated, use BoolParameter instead',
DeprecationWarning,
stacklevel=2
)
super(BooleanParameter, self).__init__(*args, **kwargs)
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 date notation for dates
(eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks
(eg. "2015-W35"). In addition, it also supports arbitrary date intervals
provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04").
"""
def parse(self, s):
"""
Parses a :py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
from luigi import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""
Class that maps to timedelta using strings in any of the following forms:
* ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
* ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
* ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
import re
re_match = re.match(regex, input)
if re_match:
kwargs = {}
has_val = False
for k, v in six.iteritems(re_match.groupdict(default="0")):
val = int(v)
has_val = has_val or val != 0
kwargs[k] = val
if has_val:
return datetime.timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return r"(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex, input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
class TaskParameter(Parameter):
"""
A parameter that takes another luigi task class.
When used programatically, the parameter should be specified
directly with the :py:class:`luigi.task.Task` (sub) class. Like
``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line,
you specify the :py:attr:`luigi.task.Task.task_family`. Like
.. code-block:: console
$ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask
Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module.
When the :py:class:`luigi.task.Task` class is instantiated to an object.
The value will always be a task class (and not a string).
"""
def parse(self, input):
"""
Parse a task_famly using the :class:`~luigi.task_register.Register`
"""
return task_register.Register.get_task_cls(input)
def serialize(self, cls):
"""
Converts the :py:class:`luigi.task.Task` (sub) class to its family name.
"""
return cls.task_family
class EnumParameter(Parameter):
"""
A parameter whose value is an :class:`~enum.Enum`.
In the task definition, use
.. code-block:: python
class Model(enum.Enum):
Honda = 1
Volvo = 2
class MyTask(luigi.Task):
my_param = luigi.EnumParameter(enum=Model)
At the command line, use,
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param Honda
"""
def __init__(self, *args, **kwargs):
if 'enum' not in kwargs:
raise ParameterException('An enum class must be specified.')
self._enum = kwargs.pop('enum')
super(EnumParameter, self).__init__(*args, **kwargs)
def parse(self, s):
try:
return self._enum[s]
except KeyError:
raise ValueError('Invalid enum value - could not be parsed')
def serialize(self, e):
return e.name
class FrozenOrderedDict(Mapping):
"""
It is an immutable wrapper around ordered dictionaries that implements the complete :py:class:`collections.Mapping`
interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired.
"""
def __init__(self, *args, **kwargs):
self.__dict = OrderedDict(*args, **kwargs)
self.__hash = None
def __getitem__(self, key):
return self.__dict[key]
def __iter__(self):
return iter(self.__dict)
def __len__(self):
return len(self.__dict)
def __repr__(self):
return '<FrozenOrderedDict %s>' % repr(self.__dict)
def __hash__(self):
if self.__hash is None:
hashes = map(hash, self.items())
self.__hash = functools.reduce(operator.xor, hashes, 0)
return self.__hash
def get_wrapped(self):
return self.__dict
class DictParameter(Parameter):
"""
Parameter whose value is a ``dict``.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
tags = luigi.DictParameter()
def run(self):
logging.info("Find server with role: %s", self.tags['role'])
server = aws.ec2.find_my_resource(self.tags)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --tags <JSON string>
Simple example with two tags:
.. code-block:: console
$ luigi --module my_tasks MyTask --tags '{"role": "web", "env": "staging"}'
It can be used to define dynamic parameters, when you do not know the exact list of your parameters (e.g. list of
tags, that are dynamically constructed outside Luigi), or you have a complex parameter containing logically related
values (like a database connection config).
"""
class DictParamEncoder(JSONEncoder):
"""
JSON encoder for :py:class:`~DictParameter`, which makes :py:class:`~FrozenOrderedDict` JSON serializable.
"""
def default(self, obj):
if isinstance(obj, FrozenOrderedDict):
return obj.get_wrapped()
return json.JSONEncoder.default(self, obj)
def normalize(self, value):
"""
Ensure that dictionary parameter is converted to a FrozenOrderedDict so it can be hashed.
"""
return FrozenOrderedDict(value)
def parse(self, s):
"""
Parses an immutable and ordered ``dict`` from a JSON string using standard JSON library.
We need to use an immutable dictionary, to create a hashable parameter and also preserve the internal structure
of parsing. The traversal order of standard ``dict`` is undefined, which can result various string
representations of this parameter, and therefore a different task id for the task containing this parameter.
This is because task id contains the hash of parameters' JSON representation.
:param s: String to be parse
"""
return json.loads(s, object_pairs_hook=FrozenOrderedDict)
def serialize(self, x):
return json.dumps(x, cls=DictParameter.DictParamEncoder)
class ListParameter(Parameter):
"""
Parameter whose value is a ``list``.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
grades = luigi.ListParameter()
def run(self):
sum = 0
for element in self.grades:
sum += element
avg = sum / len(self.grades)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --grades <JSON string>
Simple example with two grades:
.. code-block:: console
$ luigi --module my_tasks MyTask --grades '[100,70]'
"""
def normalize(self, x):
"""
Ensure that list parameter is converted to a tuple so it can be hashed.
:param str x: the value to parse.
:return: the normalized (hashable/immutable) value.
"""
return tuple(x)
def parse(self, x):
"""
Parse an individual value from the input.
:param str x: the value to parse.
:return: the parsed value.
"""
return list(json.loads(x))
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return json.dumps(x)
class TupleParameter(Parameter):
"""
Parameter whose value is a ``tuple`` or ``tuple`` of tuples.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
book_locations = luigi.TupleParameter()
def run(self):
for location in self.book_locations:
print("Go to page %d, line %d" % (location[0], location[1]))
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --book_locations <JSON string>
Simple example with two grades:
.. code-block:: console
$ luigi --module my_tasks MyTask --book_locations '((12,3),(4,15),(52,1))'
"""
def parse(self, x):
"""
Parse an individual value from the input.
:param str x: the value to parse.
:return: the parsed value.
"""
# Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case.
# A tuple string may come from a config file or from cli execution.
# t = ((1, 2), (3, 4))
# t_str = '((1,2),(3,4))'
# t_json_str = json.dumps(t)
# t_json_str == '[[1, 2], [3, 4]]'
# json.loads(t_json_str) == t
# json.loads(t_str) == ValueError: No JSON object could be decoded
# Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x).
# ast.literal_eval(t_str) == t
try:
return tuple(tuple(x) for x in json.loads(x)) # loop required to parse tuple of tuples
except ValueError:
return literal_eval(x) # if this causes an error, let that error be raised.
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return json.dumps(x)
| apache-2.0 |
dmeadows013/furry-hipster | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
googleapis/python-test-utils | test_utils/vpcsc_config.py | 10 | 4173 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
INSIDE_VPCSC_ENVVAR = "GOOGLE_CLOUD_TESTS_IN_VPCSC"
PROJECT_INSIDE_ENVVAR = "PROJECT_ID"
PROJECT_OUTSIDE_ENVVAR = "GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT"
BUCKET_OUTSIDE_ENVVAR = "GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_BUCKET"
class VPCSCTestConfig(object):
"""System test utility for VPCSC detection.
See: https://cloud.google.com/vpc-service-controls/docs/
"""
@property
def inside_vpcsc(self):
"""Test whether the test environment is configured to run inside VPCSC.
Returns:
bool:
true if the environment is configured to run inside VPCSC,
else false.
"""
return INSIDE_VPCSC_ENVVAR in os.environ
@property
def project_inside(self):
"""Project ID for testing outside access.
Returns:
str: project ID used for testing outside access; None if undefined.
"""
return os.environ.get(PROJECT_INSIDE_ENVVAR, None)
@property
def project_outside(self):
"""Project ID for testing inside access.
Returns:
str: project ID used for testing inside access; None if undefined.
"""
return os.environ.get(PROJECT_OUTSIDE_ENVVAR, None)
@property
def bucket_outside(self):
"""GCS bucket for testing inside access.
Returns:
str: bucket ID used for testing inside access; None if undefined.
"""
return os.environ.get(BUCKET_OUTSIDE_ENVVAR, None)
def skip_if_inside_vpcsc(self, testcase):
"""Test decorator: skip if running inside VPCSC."""
reason = (
"Running inside VPCSC. "
"Unset the {} environment variable to enable this test."
).format(INSIDE_VPCSC_ENVVAR)
skip = pytest.mark.skipif(self.inside_vpcsc, reason=reason)
return skip(testcase)
def skip_unless_inside_vpcsc(self, testcase):
"""Test decorator: skip if running outside VPCSC."""
reason = (
"Running outside VPCSC. "
"Set the {} environment variable to enable this test."
).format(INSIDE_VPCSC_ENVVAR)
skip = pytest.mark.skipif(not self.inside_vpcsc, reason=reason)
return skip(testcase)
def skip_unless_inside_project(self, testcase):
"""Test decorator: skip if inside project env var not set."""
reason = (
"Project ID for running inside VPCSC not set. "
"Set the {} environment variable to enable this test."
).format(PROJECT_INSIDE_ENVVAR)
skip = pytest.mark.skipif(self.project_inside is None, reason=reason)
return skip(testcase)
def skip_unless_outside_project(self, testcase):
"""Test decorator: skip if outside project env var not set."""
reason = (
"Project ID for running outside VPCSC not set. "
"Set the {} environment variable to enable this test."
).format(PROJECT_OUTSIDE_ENVVAR)
skip = pytest.mark.skipif(self.project_outside is None, reason=reason)
return skip(testcase)
def skip_unless_outside_bucket(self, testcase):
"""Test decorator: skip if outside bucket env var not set."""
reason = (
"Bucket ID for running outside VPCSC not set. "
"Set the {} environment variable to enable this test."
).format(BUCKET_OUTSIDE_ENVVAR)
skip = pytest.mark.skipif(self.bucket_outside is None, reason=reason)
return skip(testcase)
vpcsc_config = VPCSCTestConfig()
| apache-2.0 |
graalvm/fastr | mx.fastr/suite.py | 1 | 13602 | suite = {
"mxversion" : "5.282.0",
"name" : "fastr",
"versionConflictResolution" : "latest",
"imports" : {
"suites" : [
{
"name" : "truffle",
"subdir" : True,
# The version must be the same as the version of Sulong
# TRUFFLE REVISION (note: this is a marker for script that can update this)
"version" : "bb6a38440f0315ceb0e2f052c69d5b97d00c8798",
"urls" : [
{"url" : "https://github.com/graalvm/graal", "kind" : "git"},
{"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
]
},
{
"name" : "sulong",
"subdir" : True,
# The version must be the same as the version of Truffle
# TRUFFLE REVISION (note: this is a marker for script that can update this)
"version" : "bb6a38440f0315ceb0e2f052c69d5b97d00c8798",
"urls" : [
{"url" : "https://github.com/graalvm/graal", "kind" : "git"},
{"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
]
},
],
},
"repositories" : {
"snapshots" : {
"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots",
"licenses" : ["GPLv3"]
}
},
"licenses" : {
"GPLv3" : {
"name" : "GNU General Public License, version 3",
"url" : "https://www.gnu.org/licenses/gpl-3.0.html"
},
},
"defaultLicense" : "GPLv3",
# libraries that we depend on
"libraries" : {
"GNUR" : {
"path" : "libdownloads/R-4.0.3.tar.gz", # keep in sync with the GraalVM support distribution
"urls" : ["https://cran.rstudio.com/src/base/R-4/R-4.0.3.tar.gz"],
"sha1" : "5daba2d63e07a9f39d9b69b68f0642d71213ec5c",
"resource" : "true"
},
"F2C" : {
"path" : "libdownloads/f2c/src.tgz",
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/f2c/20191129/src.tgz"],
"sha1" : "8a26107bf9f82a2dcfa597f15549a412be75e0ee",
"resource" : "true"
},
"LIBF2C" : {
"path" : "libdownloads/f2c/libf2c.zip",
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/f2c/20191129/libf2c.zip"],
"sha1" : "e39a00f425f8fc41dde434686080a94e94884f30",
"resource" : "true"
},
"XZ-1.8" : {
"sha1" : "c4f7d054303948eb6a4066194253886c8af07128",
"maven" : {
"groupId" : "org.tukaani",
"artifactId" : "xz",
"version" : "1.8",
},
},
"BATIK-ALL-1.14" : {
"sha1" : "a8d228e4ae2c21efb833fdfcdfe5446fa672974a",
"maven" : {
"groupId" : "org.apache.xmlgraphics",
"artifactId" : "batik-all",
"version" : "1.14",
},
},
},
"projects" : {
"com.oracle.truffle.r.parser" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.runtime",
"truffle:ANTLR4",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"spotbugsIgnoresGenerated" : True,
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.nodes" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.runtime",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"spotbugsIgnoresGenerated" : True,
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.nodes.builtin" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.library",
"sulong:SULONG_API",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"spotbugsIgnoresGenerated" : True,
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.nodes.test" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.test",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"workingSets" : "Truffle,FastR,Test",
"jacoco" : "include",
"spotbugsIgnoresGenerated" : True,
},
"com.oracle.truffle.r.test" : {
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT",
"truffle:TRUFFLE_TCK",
"com.oracle.truffle.r.engine",
],
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"workingSets" : "Truffle,FastR,Test",
"jacoco" : "include",
"spotbugsIgnoresGenerated" : True,
},
"com.oracle.truffle.r.test.native" : {
"native" : True,
"sourceDirs" : [],
"dependencies" : ["com.oracle.truffle.r.native"],
"platformDependent" : True,
"output" : "com.oracle.truffle.r.test.native",
"buildEnv" : {
"LABS_LLVM_CC": "<toolchainGetToolPath:native,CC>",
"LABS_LLVM_CXX": "<toolchainGetToolPath:native,CXX>",
},
"results" :[
"urand/lib/liburand.so",
],
"workingSets" : "FastR",
"spotbugsIgnoresGenerated" : True,
},
"com.oracle.truffle.r.test.packages" : {
"sourceDirs" : ["r"],
"javaCompliance" : "8+",
"workingSets" : "FastR",
},
"com.oracle.truffle.r.test.packages.analyzer" : {
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT"
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"workingSets" : "FastR",
},
"com.oracle.truffle.r.engine" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.nodes.builtin",
"com.oracle.truffle.r.parser",
"sdk:JLINE3",
"truffle:TRUFFLE_NFI",
],
"generatedDependencies" : [
"com.oracle.truffle.r.parser",
],
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
"spotbugsIgnoresGenerated" : True,
},
"com.oracle.truffle.r.runtime" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.launcher",
"truffle:TRUFFLE_API",
"sulong:SULONG_API",
"XZ-1.8",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"checkstyleVersion": "8.8",
"javaCompliance" : "8+",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
"spotbugsIgnoresGenerated" : True,
},
"com.oracle.truffle.r.launcher" : {
"sourceDirs" : ["src"],
"dependencies" : [
"sdk:GRAAL_SDK",
"sdk:LAUNCHER_COMMON",
"sdk:JLINE3",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"annotationProcessors" : [
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.ffi.impl" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.ffi.processor",
"com.oracle.truffle.r.nodes",
"org.rosuda.javaGD",
'BATIK-ALL-1.14',
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
"R_FFI_PROCESSOR",
],
"workingSets" : "Truffle,FastR",
"jacoco" : "include",
"spotbugsIgnoresGenerated" : True,
},
"com.oracle.truffle.r.ffi.codegen" : {
"sourceDirs" : ["src"],
"checkstyle" : "com.oracle.truffle.r.runtime",
"dependencies" : [
"com.oracle.truffle.r.ffi.impl"
],
"javaCompliance" : "8+",
"workingSets" : "FastR",
},
"com.oracle.truffle.r.ffi.processor" : {
"sourceDirs" : ["src"],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"workingSets" : "FastR",
},
"com.oracle.truffle.r.native" : {
"sourceDirs" : [],
"dependencies" : [
"GNUR",
"F2C",
"LIBF2C",
"truffle:TRUFFLE_NFI_NATIVE",
"sulong:SULONG_BOOTSTRAP_TOOLCHAIN",
"sulong:SULONG_HOME",
"sulong:SULONG_LEGACY",
],
"native" : True,
"single_job" : True,
"workingSets" : "FastR",
"buildEnv" : {
"NFI_INCLUDES" : "-I<path:truffle:TRUFFLE_NFI_NATIVE>/include",
"LLVM_INCLUDES" : "-I<path:sulong:SULONG_LEGACY>/include -I<path:sulong:SULONG_HOME>/include",
"LLVM_LIBS_DIR" : "<path:sulong:SULONG_HOME>",
# If FASTR_RFFI=='llvm', then this is set as CC/CXX in c.o.t.r.native/Makefile
"LABS_LLVM_CC": "<toolchainGetToolPath:native,CC>",
"LABS_LLVM_CXX": "<toolchainGetToolPath:native,CXX>",
"GRAALVM_VERSION": "<graalvm_version>",
},
},
"com.oracle.truffle.r.library" : {
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.r.ffi.impl",
],
"annotationProcessors" : [
"truffle:TRUFFLE_DSL_PROCESSOR",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"workingSets" : "FastR",
"jacoco" : "include",
"spotbugsIgnoresGenerated" : True,
},
"org.rosuda.javaGD" : {
"sourceDirs" : ["src"],
"dependencies" : [],
"checkstyle" : "org.rosuda.javaGD",
"javaCompliance" : "8+",
"workingSets" : "FastR",
"jacoco" : "include",
},
"com.oracle.truffle.r.release" : {
"sourceDirs" : ["src"],
"buildDependencies" : ["com.oracle.truffle.r.native.recommended"],
"class" : "FastRReleaseProject",
"output" : "com.oracle.truffle.r.release",
},
"com.oracle.truffle.r.native.recommended" : {
"dependencies" : [
"com.oracle.truffle.r.native",
"com.oracle.truffle.r.engine",
"com.oracle.truffle.r.ffi.impl",
"com.oracle.truffle.r.launcher"
],
"max_jobs" : "8",
"native" : True,
"vpath": True,
"workingSets" : "FastR",
"buildDependencies" : ["FASTR"],
},
"com.oracle.truffle.r.test.tck" : {
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT",
"sdk:POLYGLOT_TCK",
],
"checkstyle" : "com.oracle.truffle.r.runtime",
"javaCompliance" : "8+",
"workingSets" : "FastR,Test",
"spotbugsIgnoresGenerated" : True,
},
},
"distributions" : {
"R_FFI_PROCESSOR" : {
"description" : "internal support for generating FFI classes",
"dependencies" : ["com.oracle.truffle.r.ffi.processor"],
"maven" : "False",
# FASTR and R_FFI_PROCESSOR share the actual annotations
# This could be refactored so that we have one project with just the annotations and FASTR would depend only on that
"overlaps": ["FASTR"],
},
"FASTR_LAUNCHER" : {
"description" : "launcher for the GraalVM (at the moment used only when native image is installed)",
"dependencies" : ["com.oracle.truffle.r.launcher"],
"distDependencies" : [
"sdk:GRAAL_SDK"
],
# FASTR and FASTR_LAUNCHER share one common helper class RCmdOptions
# This could be refactored in the future
"overlaps": ["FASTR"],
},
"FASTR" : {
"description" : "class files for compiling against FastR in a separate suite",
"dependencies" : [
"com.oracle.truffle.r.engine",
"com.oracle.truffle.r.launcher",
"com.oracle.truffle.r.ffi.impl"
],
"mainClass" : "com.oracle.truffle.r.launcher.RCommand",
"exclude" : [
"sdk:JLINE3",
"truffle:ANTLR4",
"GNUR",
"XZ-1.8",
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_NFI",
"truffle:TRUFFLE_NFI_NATIVE",
"sulong:SULONG_API",
],
# TODO: is this intentional that we embed things from LAUNCHER_COMMON?
"overlaps": ["sdk:LAUNCHER_COMMON"],
},
"FASTR_UNIT_TESTS" : {
"description" : "unit tests",
"dependencies" : [
"com.oracle.truffle.r.test",
"com.oracle.truffle.r.nodes.test"
],
"exclude": ["mx:HAMCREST", "mx:JUNIT"],
"distDependencies" : [
"FASTR",
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_TCK",
],
},
"FASTR_UNIT_TESTS_NATIVE" : {
"description" : "unit tests support (from test.native project)",
"native" : True,
"platformDependent" : True,
"dependencies" : [
"com.oracle.truffle.r.test.native",
],
},
"TRUFFLE_R_TCK" : {
"description" : "TCK tests provider",
"dependencies" : [
"com.oracle.truffle.r.test.tck"
],
"exclude" : [
"mx:JUNIT",
],
"distDependencies" : [
"sdk:POLYGLOT_TCK",
],
"maven" : False
},
# see mx_fastr_dists.mx_register_dynamic_suite_constituents for the definitions of some RFFI-dependent distributions
},
}
| gpl-2.0 |
xfournet/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/win32/wtsapi32.py | 102 | 11164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for wtsapi32.dll in ctypes.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
from winappdbg.win32.advapi32 import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- Constants ----------------------------------------------------------------
WTS_CURRENT_SERVER_HANDLE = 0
WTS_CURRENT_SESSION = 1
#--- WTS_PROCESS_INFO structure -----------------------------------------------
# typedef struct _WTS_PROCESS_INFO {
# DWORD SessionId;
# DWORD ProcessId;
# LPTSTR pProcessName;
# PSID pUserSid;
# } WTS_PROCESS_INFO, *PWTS_PROCESS_INFO;
class WTS_PROCESS_INFOA(Structure):
_fields_ = [
("SessionId", DWORD),
("ProcessId", DWORD),
("pProcessName", LPSTR),
("pUserSid", PSID),
]
PWTS_PROCESS_INFOA = POINTER(WTS_PROCESS_INFOA)
class WTS_PROCESS_INFOW(Structure):
_fields_ = [
("SessionId", DWORD),
("ProcessId", DWORD),
("pProcessName", LPWSTR),
("pUserSid", PSID),
]
PWTS_PROCESS_INFOW = POINTER(WTS_PROCESS_INFOW)
#--- WTSQuerySessionInformation enums and structures --------------------------
# typedef enum _WTS_INFO_CLASS {
# WTSInitialProgram = 0,
# WTSApplicationName = 1,
# WTSWorkingDirectory = 2,
# WTSOEMId = 3,
# WTSSessionId = 4,
# WTSUserName = 5,
# WTSWinStationName = 6,
# WTSDomainName = 7,
# WTSConnectState = 8,
# WTSClientBuildNumber = 9,
# WTSClientName = 10,
# WTSClientDirectory = 11,
# WTSClientProductId = 12,
# WTSClientHardwareId = 13,
# WTSClientAddress = 14,
# WTSClientDisplay = 15,
# WTSClientProtocolType = 16,
# WTSIdleTime = 17,
# WTSLogonTime = 18,
# WTSIncomingBytes = 19,
# WTSOutgoingBytes = 20,
# WTSIncomingFrames = 21,
# WTSOutgoingFrames = 22,
# WTSClientInfo = 23,
# WTSSessionInfo = 24,
# WTSSessionInfoEx = 25,
# WTSConfigInfo = 26,
# WTSValidationInfo = 27,
# WTSSessionAddressV4 = 28,
# WTSIsRemoteSession = 29
# } WTS_INFO_CLASS;
WTSInitialProgram = 0
WTSApplicationName = 1
WTSWorkingDirectory = 2
WTSOEMId = 3
WTSSessionId = 4
WTSUserName = 5
WTSWinStationName = 6
WTSDomainName = 7
WTSConnectState = 8
WTSClientBuildNumber = 9
WTSClientName = 10
WTSClientDirectory = 11
WTSClientProductId = 12
WTSClientHardwareId = 13
WTSClientAddress = 14
WTSClientDisplay = 15
WTSClientProtocolType = 16
WTSIdleTime = 17
WTSLogonTime = 18
WTSIncomingBytes = 19
WTSOutgoingBytes = 20
WTSIncomingFrames = 21
WTSOutgoingFrames = 22
WTSClientInfo = 23
WTSSessionInfo = 24
WTSSessionInfoEx = 25
WTSConfigInfo = 26
WTSValidationInfo = 27
WTSSessionAddressV4 = 28
WTSIsRemoteSession = 29
WTS_INFO_CLASS = ctypes.c_int
# typedef enum _WTS_CONNECTSTATE_CLASS {
# WTSActive,
# WTSConnected,
# WTSConnectQuery,
# WTSShadow,
# WTSDisconnected,
# WTSIdle,
# WTSListen,
# WTSReset,
# WTSDown,
# WTSInit
# } WTS_CONNECTSTATE_CLASS;
WTSActive = 0
WTSConnected = 1
WTSConnectQuery = 2
WTSShadow = 3
WTSDisconnected = 4
WTSIdle = 5
WTSListen = 6
WTSReset = 7
WTSDown = 8
WTSInit = 9
WTS_CONNECTSTATE_CLASS = ctypes.c_int
# typedef struct _WTS_CLIENT_DISPLAY {
# DWORD HorizontalResolution;
# DWORD VerticalResolution;
# DWORD ColorDepth;
# } WTS_CLIENT_DISPLAY, *PWTS_CLIENT_DISPLAY;
class WTS_CLIENT_DISPLAY(Structure):
_fields_ = [
("HorizontalResolution", DWORD),
("VerticalResolution", DWORD),
("ColorDepth", DWORD),
]
PWTS_CLIENT_DISPLAY = POINTER(WTS_CLIENT_DISPLAY)
# typedef struct _WTS_CLIENT_ADDRESS {
# DWORD AddressFamily;
# BYTE Address[20];
# } WTS_CLIENT_ADDRESS, *PWTS_CLIENT_ADDRESS;
# XXX TODO
# typedef struct _WTSCLIENT {
# WCHAR ClientName[CLIENTNAME_LENGTH + 1];
# WCHAR Domain[DOMAIN_LENGTH + 1 ];
# WCHAR UserName[USERNAME_LENGTH + 1];
# WCHAR WorkDirectory[MAX_PATH + 1];
# WCHAR InitialProgram[MAX_PATH + 1];
# BYTE EncryptionLevel;
# ULONG ClientAddressFamily;
# USHORT ClientAddress[CLIENTADDRESS_LENGTH + 1];
# USHORT HRes;
# USHORT VRes;
# USHORT ColorDepth;
# WCHAR ClientDirectory[MAX_PATH + 1];
# ULONG ClientBuildNumber;
# ULONG ClientHardwareId;
# USHORT ClientProductId;
# USHORT OutBufCountHost;
# USHORT OutBufCountClient;
# USHORT OutBufLength;
# WCHAR DeviceId[MAX_PATH + 1];
# } WTSCLIENT, *PWTSCLIENT;
# XXX TODO
# typedef struct _WTSINFO {
# WTS_CONNECTSTATE_CLASS State;
# DWORD SessionId;
# DWORD IncomingBytes;
# DWORD OutgoingBytes;
# DWORD IncomingCompressedBytes;
# DWORD OutgoingCompressedBytes;
# WCHAR WinStationName;
# WCHAR Domain;
# WCHAR UserName;
# LARGE_INTEGER ConnectTime;
# LARGE_INTEGER DisconnectTime;
# LARGE_INTEGER LastInputTime;
# LARGE_INTEGER LogonTime;
# LARGE_INTEGER CurrentTime;
# } WTSINFO, *PWTSINFO;
# XXX TODO
# typedef struct _WTSINFOEX {
# DWORD Level;
# WTSINFOEX_LEVEL Data;
# } WTSINFOEX, *PWTSINFOEX;
# XXX TODO
#--- wtsapi32.dll -------------------------------------------------------------
# void WTSFreeMemory(
# __in PVOID pMemory
# );
def WTSFreeMemory(pMemory):
_WTSFreeMemory = windll.wtsapi32.WTSFreeMemory
_WTSFreeMemory.argtypes = [PVOID]
_WTSFreeMemory.restype = None
_WTSFreeMemory(pMemory)
# BOOL WTSEnumerateProcesses(
# __in HANDLE hServer,
# __in DWORD Reserved,
# __in DWORD Version,
# __out PWTS_PROCESS_INFO *ppProcessInfo,
# __out DWORD *pCount
# );
def WTSEnumerateProcessesA(hServer = WTS_CURRENT_SERVER_HANDLE):
_WTSEnumerateProcessesA = windll.wtsapi32.WTSEnumerateProcessesA
_WTSEnumerateProcessesA.argtypes = [HANDLE, DWORD, DWORD, POINTER(PWTS_PROCESS_INFOA), PDWORD]
_WTSEnumerateProcessesA.restype = bool
_WTSEnumerateProcessesA.errcheck = RaiseIfZero
pProcessInfo = PWTS_PROCESS_INFOA()
Count = DWORD(0)
_WTSEnumerateProcessesA(hServer, 0, 1, byref(pProcessInfo), byref(Count))
return pProcessInfo, Count.value
def WTSEnumerateProcessesW(hServer = WTS_CURRENT_SERVER_HANDLE):
_WTSEnumerateProcessesW = windll.wtsapi32.WTSEnumerateProcessesW
_WTSEnumerateProcessesW.argtypes = [HANDLE, DWORD, DWORD, POINTER(PWTS_PROCESS_INFOW), PDWORD]
_WTSEnumerateProcessesW.restype = bool
_WTSEnumerateProcessesW.errcheck = RaiseIfZero
pProcessInfo = PWTS_PROCESS_INFOW()
Count = DWORD(0)
_WTSEnumerateProcessesW(hServer, 0, 1, byref(pProcessInfo), byref(Count))
return pProcessInfo, Count.value
WTSEnumerateProcesses = DefaultStringType(WTSEnumerateProcessesA, WTSEnumerateProcessesW)
# BOOL WTSTerminateProcess(
# __in HANDLE hServer,
# __in DWORD ProcessId,
# __in DWORD ExitCode
# );
def WTSTerminateProcess(hServer, ProcessId, ExitCode):
_WTSTerminateProcess = windll.wtsapi32.WTSTerminateProcess
_WTSTerminateProcess.argtypes = [HANDLE, DWORD, DWORD]
_WTSTerminateProcess.restype = bool
_WTSTerminateProcess.errcheck = RaiseIfZero
_WTSTerminateProcess(hServer, ProcessId, ExitCode)
# BOOL WTSQuerySessionInformation(
# __in HANDLE hServer,
# __in DWORD SessionId,
# __in WTS_INFO_CLASS WTSInfoClass,
# __out LPTSTR *ppBuffer,
# __out DWORD *pBytesReturned
# );
# XXX TODO
#--- kernel32.dll -------------------------------------------------------------
# I've no idea why these functions are in kernel32.dll instead of wtsapi32.dll
# BOOL ProcessIdToSessionId(
# __in DWORD dwProcessId,
# __out DWORD *pSessionId
# );
def ProcessIdToSessionId(dwProcessId):
_ProcessIdToSessionId = windll.kernel32.ProcessIdToSessionId
_ProcessIdToSessionId.argtypes = [DWORD, PDWORD]
_ProcessIdToSessionId.restype = bool
_ProcessIdToSessionId.errcheck = RaiseIfZero
dwSessionId = DWORD(0)
_ProcessIdToSessionId(dwProcessId, byref(dwSessionId))
return dwSessionId.value
# DWORD WTSGetActiveConsoleSessionId(void);
def WTSGetActiveConsoleSessionId():
_WTSGetActiveConsoleSessionId = windll.kernel32.WTSGetActiveConsoleSessionId
_WTSGetActiveConsoleSessionId.argtypes = []
_WTSGetActiveConsoleSessionId.restype = DWORD
_WTSGetActiveConsoleSessionId.errcheck = RaiseIfZero
return _WTSGetActiveConsoleSessionId()
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| apache-2.0 |
cmdelatorre/fractalisar | fractal_manager.py | 1 | 6479 | #-*- coding: utf-8 -*-
# FractalisAR: an augmented reality experiment with fractals
# Copyright (C) 2015 Carlos Matías de la Torre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import cv2
import os
import serial
from image_processing import mask_by_pixel_value, dark_mask, masked_merge
class BlendModes:
MASK, OVERLAY = range(2)
class FractalsDirectory(object):
"""
Keep a sequence of images and provide a method to merge the current image
into another one.
"""
def __init__(self, data_src=None, mode='mask', filter_conf=None,
color_low=None, color_high=None):
super(FractalsDirectory, self).__init__()
self.data_src = data_src
self.mode = mode
self.images = []
self.height = None
self.width = None
self.current_index = 0
self.current_image = None
# Merge's specific parameters
if mode == BlendModes.MASK and (
filter_conf is None or color_low is None or color_high is None):
raise Exception("If fractal mode is MASK, then filter_conf, "
"color_low and color_high arguments must be given")
self.filter_conf = filter_conf
self.color_low = color_low
self.color_high = color_high
def set_dimensions(self, height, width):
self.height = height
self.width = width
def load(self):
"""
Load all the jpg or png images in the source directory.
If necessary, resize to the fractal's height and width.
"""
for image_fname in os.listdir(self.data_src):
fname = os.path.join(self.data_src, image_fname)
_, file_extension = os.path.splitext(fname)
if file_extension.lower() in ['.jpg', '.png']:
image = cv2.imread(fname)
height, width, _ = image.shape
if self.height is not None and height != self.height:
height = self.height
if self.width is not None and width != self.width:
width = self.width
self.images.append(cv2.resize(image, (width, height)))
self.current_image = self.images[0]
return self
def next_image(self):
self.current_image = self.images[self.current_index]
self.current_index = (self.current_index + 1) % len(self.images)
return self.current_image
def merge(self, image):
fractal = self.next_image()
if self.mode == BlendModes.MASK:
# Transform the BGR image to the HSV color space.
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Create a Mask by subseting the HSV values of the image.
mask = mask_by_pixel_value(
hsv_image, self.color_low, self.color_high,
filter_conf=self.filter_conf)
invert_mask = False
else: # BlendModes.OVERLAY
# Create a mask with the dark pixels of the fractal
mask = dark_mask(fractal)
invert_mask = True
return masked_merge(image, fractal, mask, invert_mask=invert_mask)
def sorted_index(arr, item, delta=0):
"""
Return the position that the item should occupy to keep the list sorted.
Assumes that arr is a sorted list.
"""
if len(arr) == 0:
pos = 0
elif len(arr) == 1:
if item <= arr[0]:
pos = 0
else:
pos = 1
elif len(arr) == 2:
if item <= arr[0]:
pos = 0
elif item <= arr[1]:
pos = 1
else:
pos = 2
else:
ini = 0
end = len(arr)
mid = int((end + ini) / 2)
if item == arr[mid]:
pos = mid
elif item < arr[mid]:
pos = sorted_index(arr[ini: mid], item, delta=ini)
else:
pos = sorted_index(arr[mid+1: end], item, delta=mid+1)
return pos + delta
# A FractalManager registers FractalGallery instances and implements a policy to
# decide which Fractal to return when get_current() is called.
# By sub-classing the FractalManager class, different fractal-selection
# policies can be implemented, using different inputs (sensors).
# The current implementation of the FractalManager defines a validity range
# for each registered Fractal. It uses a DistanceSensor instance to get a
# distance value and select a Fractal based on it.
class FractalManager(object):
def __init__(self, sensor):
super(FractalManager, self).__init__()
# fractals and distances are two lists matched by index:
# the i-th distance in the list corresponds to the i-th fractal
self.fractals = []
self.distances = []
self.sensor = sensor
self.current_fractal = None
def register(self, fractal, distance, height=480, width=640):
fractal.set_dimensions(height=height, width=width)
fractal.load()
pos = sorted_index(self.distances, distance)
if pos < len(self.distances) and self.distances[pos] == distance:
raise Exception("There's another fractal at distance %d" % distance)
self.fractals.insert(pos, fractal)
self.distances.insert(pos, distance)
if self.current_fractal is None:
self.current_fractal = fractal
def get_current(self):
"""Return the fractal corresponding to the sensed distance."""
distance = self.sensor.get_data()
if distance:
target = sorted_index(self.distances, distance)
if target < len(self.fractals):
self.current_fractal = self.fractals[target]
else: # the distance is higher than the upper limit
self.current_fractal = self.fractals[-1]
return self.current_fractal
| gpl-2.0 |
datalogics-robb/scons | src/engine/SCons/Tool/ToolTests.py | 2 | 2727 | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import unittest
import SCons.Errors
import SCons.Tool
class ToolTestCase(unittest.TestCase):
def test_Tool(self):
"""Test the Tool() function"""
class Environment:
def __init__(self):
self.dict = {}
def Detect(self, progs):
if not SCons.Util.is_List(progs):
progs = [ progs ]
return progs[0]
def Append(self, **kw):
self.dict.update(kw)
def __getitem__(self, key):
return self.dict[key]
def __setitem__(self, key, val):
self.dict[key] = val
def has_key(self, key):
return self.dict.has_key(key)
env = Environment()
env['BUILDERS'] = {}
env['ENV'] = {}
env['PLATFORM'] = 'test'
t = SCons.Tool.Tool('g++')
t(env)
assert (env['CXX'] == 'c++' or env['CXX'] == 'g++'), env['CXX']
assert env['CXXFLAGS'] == ['$CCFLAGS'], env['CXXFLAGS']
assert env['INCPREFIX'] == '-I', env['INCPREFIX']
assert env['TOOLS'] == ['g++'], env['TOOLS']
try:
SCons.Tool.Tool()
except TypeError:
pass
else:
raise
try:
p = SCons.Tool.Tool('_does_not_exist_')
except SCons.Errors.UserError:
pass
else:
raise
if __name__ == "__main__":
suite = unittest.makeSuite(ToolTestCase, 'test_')
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
| mit |
mikecroucher/GPy | benchmarks/regression/outputs.py | 15 | 2463 | # Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
import abc
import os
import numpy as np
class Output(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def output(self, config, results):
"""Return the test data: training data and labels"""
return None
class ScreenOutput(Output):
def output(self, config, results):
print('='*10+'Report'+'='*10)
print('\t'.join([' ']+[m.name+'('+e+')' for m in config['methods'] for e in [a.name for a in config['evaluations']]+['time']]))
for task_i in range(len(config['tasks'])):
print(config['tasks'][task_i].name+'\t', end='')
outputs = []
for method_i in range(len(config['methods'])):
for ei in range(len(config['evaluations'])+1):
m,s = results[task_i, method_i, ei].mean(), results[task_i, method_i, ei].std()
outputs.append('%e(%e)'%(m,s))
print('\t'.join(outputs))
class CSVOutput(Output):
def __init__(self, outpath, prjname):
self.fname = os.path.join(outpath, prjname+'.csv')
def output(self, config, results):
with open(self.fname,'w') as f:
f.write(','.join([' ']+[m.name+'('+e+')' for m in config['methods'] for e in [a.name for a in config['evaluations']]+['time']])+'\n')
for task_i in range(len(config['tasks'])):
f.write(config['tasks'][task_i].name+',')
outputs = []
for method_i in range(len(config['methods'])):
for ei in range(len(config['evaluations'])+1):
m,s = results[task_i, method_i, ei].mean(), results[task_i, method_i, ei].std()
outputs.append('%e (%e)'%(m,s))
f.write(','.join(outputs)+'\n')
f.close()
class H5Output(Output):
def __init__(self, outpath, prjname):
self.fname = os.path.join(outpath, prjname+'.h5')
def output(self, config, results):
try:
import h5py
f = h5py.File(self.fname,'w')
d = f.create_dataset('results',results.shape, dtype=results.dtype)
d[:] = results
f.close()
except:
raise 'Fails to write the parameters into a HDF5 file!'
| bsd-3-clause |
clouddocx/boto | tests/integration/ec2/elb/test_cert_verification.py | 114 | 1577 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
from tests.integration import ServiceCertVerificationTest
from tests.compat import unittest
import boto.ec2.elb
class ELBCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
elb = True
regions = boto.ec2.elb.regions()
def sample_service_call(self, conn):
conn.get_all_load_balancers()
| mit |
colinnewell/odoo | addons/payment_ogone/controllers/main.py | 389 | 1179 | # -*- coding: utf-8 -*-
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class OgoneController(http.Controller):
_accept_url = '/payment/ogone/test/accept'
_decline_url = '/payment/ogone/test/decline'
_exception_url = '/payment/ogone/test/exception'
_cancel_url = '/payment/ogone/test/cancel'
@http.route([
'/payment/ogone/accept', '/payment/ogone/test/accept',
'/payment/ogone/decline', '/payment/ogone/test/decline',
'/payment/ogone/exception', '/payment/ogone/test/exception',
'/payment/ogone/cancel', '/payment/ogone/test/cancel',
], type='http', auth='none')
def ogone_form_feedback(self, **post):
""" Ogone contacts using GET, at least for accept """
_logger.info('Ogone: entering form_feedback with post data %s', pprint.pformat(post)) # debug
cr, uid, context = request.cr, SUPERUSER_ID, request.context
request.registry['payment.transaction'].form_feedback(cr, uid, post, 'ogone', context=context)
return werkzeug.utils.redirect(post.pop('return_url', '/'))
| agpl-3.0 |
arbitrahj/django-timepiece | timepiece/utils/__init__.py | 2 | 3196 | import datetime
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.db.models import get_model
from django.utils import timezone
from timepiece.defaults import TimepieceDefaults
class ActiveEntryError(Exception):
"""A user should have no more than one active entry at a given time."""
pass
def add_timezone(value, tz=None):
"""If the value is naive, then the timezone is added to it.
If no timezone is given, timezone.get_current_timezone() is used.
"""
tz = tz or timezone.get_current_timezone()
try:
if timezone.is_naive(value):
return timezone.make_aware(value, tz)
except AttributeError: # 'datetime.date' object has no attribute 'tzinfo'
dt = datetime.datetime.combine(value, datetime.time())
return timezone.make_aware(dt, tz)
return value
def get_active_entry(user, select_for_update=False):
"""Returns the user's currently-active entry, or None."""
entries = get_model('entries', 'Entry').no_join
if select_for_update:
entries = entries.select_for_update()
entries = entries.filter(user=user, end_time__isnull=True)
if not entries.exists():
return None
if entries.count() > 1:
raise ActiveEntryError('Only one active entry is allowed.')
return entries[0]
def get_hours_summary(entries):
hours = {
'total': 0,
'billable': 0,
'non_billable': 0,
}
for entry in entries:
hours['total'] += entry['hours']
status = 'billable' if entry['billable'] else 'non_billable'
hours[status] += entry['hours']
return hours
def get_last_billable_day(day=None):
day = day or datetime.date.today()
day += relativedelta(months=1)
return get_week_start(get_month_start(day)) - relativedelta(days=1)
def get_month_start(day=None):
"""Returns the first day of the given month."""
day = add_timezone(day or datetime.date.today())
return day.replace(day=1)
defaults = TimepieceDefaults()
def get_setting(name, **kwargs):
"""Returns the user-defined value for the setting, or a default value."""
if hasattr(settings, name): # Try user-defined settings first.
return getattr(settings, name)
if 'default' in kwargs: # Fall back to a specified default value.
return kwargs['default']
if hasattr(defaults, name): # If that's not given, look in defaults file.
return getattr(defaults, name)
msg = '{0} must be specified in your project settings.'.format(name)
raise AttributeError(msg)
def get_week_start(day=None):
"""Returns the Monday of the given week."""
day = add_timezone(day or datetime.date.today())
days_since_monday = day.weekday()
if days_since_monday != 0:
day = day - relativedelta(days=days_since_monday)
return day
def get_year_start(day=None):
"""Returns January 1 of the given year."""
day = add_timezone(day or datetime.date.today())
return day.replace(month=1).replace(day=1)
def to_datetime(date):
"""Transforms a date or datetime object into a date object."""
return datetime.datetime(date.year, date.month, date.day)
| mit |
meredith-digops/ansible | lib/ansible/plugins/action/win_template.py | 269 | 1198 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.plugins.action.template import ActionModule as TemplateActionModule
# Even though TemplateActionModule inherits from ActionBase, we still need to
# directly inherit from ActionBase to appease the plugin loader.
class ActionModule(TemplateActionModule, ActionBase):
DEFAULT_NEWLINE_SEQUENCE = '\r\n'
| gpl-3.0 |
kennedyshead/home-assistant | homeassistant/components/zwave/util.py | 27 | 4070 | """Zwave util methods."""
import asyncio
import logging
import homeassistant.util.dt as dt_util
from . import const
_LOGGER = logging.getLogger(__name__)
def check_node_schema(node, schema):
"""Check if node matches the passed node schema."""
if const.DISC_NODE_ID in schema and node.node_id not in schema[const.DISC_NODE_ID]:
_LOGGER.debug(
"node.node_id %s not in node_id %s",
node.node_id,
schema[const.DISC_NODE_ID],
)
return False
if (
const.DISC_GENERIC_DEVICE_CLASS in schema
and node.generic not in schema[const.DISC_GENERIC_DEVICE_CLASS]
):
_LOGGER.debug(
"node.generic %s not in generic_device_class %s",
node.generic,
schema[const.DISC_GENERIC_DEVICE_CLASS],
)
return False
if (
const.DISC_SPECIFIC_DEVICE_CLASS in schema
and node.specific not in schema[const.DISC_SPECIFIC_DEVICE_CLASS]
):
_LOGGER.debug(
"node.specific %s not in specific_device_class %s",
node.specific,
schema[const.DISC_SPECIFIC_DEVICE_CLASS],
)
return False
return True
def check_value_schema(value, schema):
"""Check if the value matches the passed value schema."""
if (
const.DISC_COMMAND_CLASS in schema
and value.command_class not in schema[const.DISC_COMMAND_CLASS]
):
_LOGGER.debug(
"value.command_class %s not in command_class %s",
value.command_class,
schema[const.DISC_COMMAND_CLASS],
)
return False
if const.DISC_TYPE in schema and value.type not in schema[const.DISC_TYPE]:
_LOGGER.debug(
"value.type %s not in type %s", value.type, schema[const.DISC_TYPE]
)
return False
if const.DISC_GENRE in schema and value.genre not in schema[const.DISC_GENRE]:
_LOGGER.debug(
"value.genre %s not in genre %s", value.genre, schema[const.DISC_GENRE]
)
return False
if const.DISC_INDEX in schema and value.index not in schema[const.DISC_INDEX]:
_LOGGER.debug(
"value.index %s not in index %s", value.index, schema[const.DISC_INDEX]
)
return False
if (
const.DISC_INSTANCE in schema
and value.instance not in schema[const.DISC_INSTANCE]
):
_LOGGER.debug(
"value.instance %s not in instance %s",
value.instance,
schema[const.DISC_INSTANCE],
)
return False
if const.DISC_SCHEMAS in schema:
found = False
for schema_item in schema[const.DISC_SCHEMAS]:
found = found or check_value_schema(value, schema_item)
if not found:
return False
return True
def node_name(node):
"""Return the name of the node."""
if is_node_parsed(node):
return node.name or f"{node.manufacturer_name} {node.product_name}"
return f"Unknown Node {node.node_id}"
def node_device_id_and_name(node, instance=1):
"""Return the name and device ID for the value with the given index."""
name = node_name(node)
if instance == 1:
return ((const.DOMAIN, node.node_id), name)
name = f"{name} ({instance})"
return ((const.DOMAIN, node.node_id, instance), name)
async def check_has_unique_id(entity, ready_callback, timeout_callback):
"""Wait for entity to have unique_id."""
start_time = dt_util.utcnow()
while True:
waited = int((dt_util.utcnow() - start_time).total_seconds())
if entity.unique_id:
ready_callback(waited)
return
if waited >= const.NODE_READY_WAIT_SECS:
# Wait up to NODE_READY_WAIT_SECS seconds for unique_id to appear.
timeout_callback(waited)
return
await asyncio.sleep(1)
def is_node_parsed(node):
"""Check whether the node has been parsed or still waiting to be parsed."""
return bool((node.manufacturer_name and node.product_name) or node.name)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.