repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
pizzapanther/GAE-Bulk-Mailer | django/contrib/formtools/preview.py | 315 | 5754 | """
Formtools Preview application.
"""
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.crypto import constant_time_compare
from django.contrib.formtools.utils import form_hmac
AUTO_ID = 'formtools_%s' # Each form here uses this as its auto_id parameter.
class FormPreview(object):
preview_template = 'formtools/preview.html'
form_template = 'formtools/form.html'
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form):
# form should be a Form class, not an instance.
self.form, self.state = form, {}
def __call__(self, request, *args, **kwargs):
stage = {'1': 'preview', '2': 'post'}.get(request.POST.get(self.unused_name('stage')), 'preview')
self.parse_params(*args, **kwargs)
try:
method = getattr(self, stage + '_' + request.method.lower())
except AttributeError:
raise Http404
return method(request)
def unused_name(self, name):
"""
Given a first-choice name, adds an underscore to the name until it
reaches a name that isn't claimed by any field in the form.
This is calculated rather than being hard-coded so that no field names
are off-limits for use in the form.
"""
while 1:
try:
f = self.form.base_fields[name]
except KeyError:
break # This field name isn't being used by the form.
name += '_'
return name
def preview_get(self, request):
"Displays the form"
f = self.form(auto_id=self.get_auto_id(), initial=self.get_initial(request))
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
def preview_post(self, request):
"Validates the POST data. If valid, displays the preview page. Else, redisplays form."
f = self.form(request.POST, auto_id=self.get_auto_id())
context = self.get_context(request, f)
if f.is_valid():
self.process_preview(request, f, context)
context['hash_field'] = self.unused_name('hash')
context['hash_value'] = self.security_hash(request, f)
return render_to_response(self.preview_template, context, context_instance=RequestContext(request))
else:
return render_to_response(self.form_template, context, context_instance=RequestContext(request))
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
return constant_time_compare(token, expected)
def post_post(self, request):
"Validates the POST data. If valid, calls done(). Else, redisplays form."
f = self.form(request.POST, auto_id=self.get_auto_id())
if f.is_valid():
if not self._check_security_hash(request.POST.get(self.unused_name('hash'), ''),
request, f):
return self.failed_hash(request) # Security hash failed.
return self.done(request, f.cleaned_data)
else:
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def get_auto_id(self):
"""
Hook to override the ``auto_id`` kwarg for the form. Needed when
rendering two form previews in the same template.
"""
return AUTO_ID
def get_initial(self, request):
"""
Takes a request argument and returns a dictionary to pass to the form's
``initial`` kwarg when the form is being created from an HTTP get.
"""
return {}
def get_context(self, request, form):
"Context for template rendering."
return {'form': form, 'stage_field': self.unused_name('stage'), 'state': self.state}
def parse_params(self, *args, **kwargs):
"""
Given captured args and kwargs from the URLconf, saves something in
self.state and/or raises Http404 if necessary.
For example, this URLconf captures a user_id variable:
(r'^contact/(?P<user_id>\d{1,6})/$', MyFormPreview(MyForm)),
In this case, the kwargs variable in parse_params would be
{'user_id': 32} for a request to '/contact/32/'. You can use that
user_id to make sure it's a valid user and/or save it for later, for
use in done().
"""
pass
def process_preview(self, request, form, context):
"""
Given a validated form, performs any extra processing before displaying
the preview page, and saves any extra data in context.
"""
pass
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return form_hmac(form)
def failed_hash(self, request):
"Returns an HttpResponse in the case of an invalid security hash."
return self.preview_post(request)
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, cleaned_data):
"""
Does something with the cleaned_data and returns an
HttpResponseRedirect.
"""
raise NotImplementedError('You must define a done() method on your %s subclass.' % self.__class__.__name__)
| bsd-2-clause |
Sabayon/anaconda | pyanaconda/ui/gui/spokes/advstorage/iscsi.py | 1 | 18143 | # iSCSI configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
from IPy import IP
from collections import namedtuple
from gi.repository import GLib
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
from pyanaconda.ui.gui import GUIObject
from pyanaconda import nm
__all__ = ["ISCSIDialog"]
STYLE_NONE = 0
STYLE_CHAP = 1
STYLE_REVERSE_CHAP = 2
Credentials = namedtuple("Credentials", ["style",
"targetIP", "initiator", "username",
"password", "rUsername", "rPassword"])
NodeStoreRow = namedtuple("NodeStoreRow", ["selected", "notLoggedIn", "name", "iface"])
def discover_no_credentials(builder):
return Credentials(STYLE_NONE,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
"", "", "", "")
def discover_chap(builder):
return Credentials(STYLE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("chapUsernameEntry").get_text(),
builder.get_object("chapPasswordEntry").get_text(),
"", "")
def discover_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("rchapUsernameEntry").get_text(),
builder.get_object("rchapPasswordEntry").get_text(),
builder.get_object("rchapReverseUsername").get_text(),
builder.get_object("rchapReversePassword").get_text())
# This list maps the current page from the authNotebook to a function to grab
# credentials out of the UI. This works as long as authNotebook keeps the
# filler page at the front.
discoverMap = [discover_no_credentials, discover_chap, discover_reverse_chap]
def login_no_credentials(builder):
return Credentials(STYLE_NONE,
"", "",
"", "", "", "")
def login_chap(builder):
return Credentials(STYLE_CHAP,
"", "",
builder.get_object("loginChapUsernameEntry").get_text(),
builder.get_object("loginChapPasswordEntry").get_text(),
"", "")
def login_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
"", "",
builder.get_object("loginRchapUsernameEntry").get_text(),
builder.get_object("loginRchapPasswordEntry").get_text(),
builder.get_object("loginRchapReverseUsername").get_text(),
builder.get_object("loginRchapReversePassword").get_text())
# And this list maps the current page from the loginAuthNotebook to a function
# to grab credentials out of the UI. This works as long as loginAuthNotebook
# keeps the filler page at the front, and we check to make sure "Use the
# credentials from discovery" is not selected first.
loginMap = [login_no_credentials, login_chap, login_reverse_chap]
def credentials_valid(credentials):
if credentials.style == STYLE_NONE:
return True
elif credentials.style == STYLE_CHAP:
return credentials.username.strip() != "" and credentials.password != ""
elif credentials.style == STYLE_REVERSE_CHAP:
return credentials.username.strip() != "" and credentials.password != "" and \
credentials.rUsername.strip() != "" and credentials.rPassword != ""
class ISCSIDialog(GUIObject):
builderObjects = ["iscsiDialog", "nodeStore", "nodeStoreFiltered"]
mainWidgetName = "iscsiDialog"
uiFile = "spokes/advstorage/iscsi.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.iscsi = self.storage.iscsi()
self._discoveryError = None
self._loginError = False
self._discoveredNodes = []
self._update_devicetree = False
self._authTypeCombo = self.builder.get_object("authTypeCombo")
self._authNotebook = self.builder.get_object("authNotebook")
self._iscsiNotebook = self.builder.get_object("iscsiNotebook")
self._loginButton = self.builder.get_object("loginButton")
self._loginAuthTypeCombo = self.builder.get_object("loginAuthTypeCombo")
self._loginAuthNotebook = self.builder.get_object("loginAuthNotebook")
self._loginGrid = self.builder.get_object("loginGrid")
self._loginConditionNotebook = self.builder.get_object("loginConditionNotebook")
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._bindCheckbox = self.builder.get_object("bindCheckbutton")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._initiatorEntry = self.builder.get_object("initiatorEntry")
self._store = self.builder.get_object("nodeStore")
def refresh(self):
self._bindCheckbox.set_active(bool(self.iscsi.ifaces))
self._bindCheckbox.set_sensitive(self.iscsi.mode == "none")
self._authTypeCombo.set_active(0)
self._startButton.set_sensitive(True)
self._loginAuthTypeCombo.set_active(0)
self.builder.get_object("nodeStoreFiltered").set_visible_column(1)
self._initiatorEntry.set_text(self.iscsi.initiator)
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
@property
def selectedNames(self):
return [itr[2] for itr in self._store if itr[0]]
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
self.storage.devicetree.populate()
return rc
##
## DISCOVERY
##
def on_auth_type_changed(self, widget, *args):
self._authNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Start button sensitivity.
self.on_discover_field_changed()
def _discover(self, credentials, bind):
# This needs to be in its own thread, not marked with gtk_action_* because it's
# called from on_start_clicked, which is in the GTK main loop. Those decorators
# won't do anything special in that case.
if not self.iscsi.initiatorSet:
self.iscsi.initiator = credentials.initiator
# interfaces created here affect nodes that iscsi.discover would return
if self.iscsi.mode == "none" and not bind:
self.iscsi.delete_interfaces()
elif (self.iscsi.mode == "bind"
or self.iscsi.mode == "none" and bind):
activated = set(nm.nm_activated_devices())
created = set(self.iscsi.ifaces.values())
self.iscsi.create_interfaces(activated - created)
try:
self._discoveredNodes = self.iscsi.discover(credentials.targetIP,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
except IOError as e:
self._discoveryError = str(e)
return
if len(self._discoveredNodes) == 0:
self._discoveryError = "No nodes discovered."
def _check_discover(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_DISCOVER):
return True
# When iscsi discovery is done, update the UI. We don't need to worry
# about the user escaping from the dialog because all the buttons are
# marked insensitive.
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure. Display some error message and leave the user on the
# dialog to try again.
self.builder.get_object("discoveryErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Success. Now populate the node store and kick the user on over to
# that subscreen.
self._add_nodes(self._discoveredNodes)
self._iscsiNotebook.set_current_page(1)
self._okButton.set_sensitive(True)
# If some form of login credentials were used for discovery,
# default to using the same for login.
if self._authTypeCombo.get_active() != 0:
self._loginAuthTypeCombo.set_active(3)
# We always want to enable this button, in case the user's had enough.
self._cancelButton.set_sensitive(True)
return False
def _set_configure_sensitive(self, sensitivity):
for child in self._configureGrid.get_children():
if child == self._initiatorEntry:
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
elif child == self._bindCheckbox:
self._bindCheckbox.set_sensitive(sensitivity and self.iscsi.mode == "none")
elif child != self._conditionNotebook:
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
# First, update some widgets to not be usable while discovery happens.
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._initiatorEntry.set_sensitive(False)
# Now get the node discovery credentials.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
discoveredLabel = self.builder.get_object("discoveredLabel")
discoveredLabel.set_markup(discoveredLabel.get_label() % {"initiatorName": credentials.initiator,
"targetAddress": credentials.targetIP})
bind = self._bindCheckbox.get_active()
spinner = self.builder.get_object("waitSpinner")
spinner.start()
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_DISCOVER, target=self._discover,
args=(credentials, bind)))
GLib.timeout_add(250, self._check_discover)
# When the initiator name, ip address, and any auth fields are filled in
# valid, only then should the Start button be made sensitive.
def _target_ip_valid(self):
widget = self.builder.get_object("targetEntry")
text = widget.get_text()
try:
IP(text)
return True
except ValueError:
return False
def _initiator_name_valid(self):
widget = self.builder.get_object("initiatorEntry")
text = widget.get_text()
stripped = text.strip()
return "." in stripped and ":" in stripped
def on_discover_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
sensitive = self._target_ip_valid() and self._initiator_name_valid() and credentials_valid(credentials)
self._startButton.set_sensitive(sensitive)
##
## LOGGING IN
##
def _add_nodes(self, nodes):
for node in nodes:
iface = self.iscsi.ifaces.get(node.iface, node.iface)
self._store.append([False, True, node.name, iface])
# We should select the first node by default.
self._store[0][0] = True
def on_login_type_changed(self, widget, *args):
self._loginAuthNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Log In button sensitivity.
self.on_login_field_changed()
def on_row_toggled(self, button, path):
if not path:
return
# Then, go back and mark just this row as selected.
itr = self._store.get_iter(path)
self._store[itr][0] = not self._store[itr][0]
def _login(self, credentials):
for row in self._store:
obj = NodeStoreRow(*row)
if not obj.selected:
continue
for node in self._discoveredNodes:
if obj.notLoggedIn and node.name == obj.name:
# when binding interfaces match also interface
if self.iscsi.ifaces and \
obj.iface != self.iscsi.ifaces[node.iface]:
continue
(rc, msg) = self.iscsi.log_into_node(node,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
if not rc:
self._loginError = msg
return
self._update_devicetree = True
row[1] = False
def _check_login(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_LOGIN):
return True
spinner = self.builder.get_object("loginSpinner")
spinner.stop()
spinner.hide()
if self._loginError:
self.builder.get_object("loginErrorLabel").set_text(self._loginError)
self._loginError = None
self._loginConditionNotebook.set_current_page(1)
self._cancelButton.set_sensitive(True)
self._loginButton.set_sensitive(True)
else:
anyLeft = False
self._loginConditionNotebook.set_current_page(0)
# Select the now-first target for the user in case they want to
# log into another one.
for row in self._store:
if row[1]:
row[0] = True
anyLeft = True
# And make the login button sensitive if there are any more
# nodes to login to.
self._loginButton.set_sensitive(True)
break
self._okButton.set_sensitive(True)
# Once a node has been logged into, it doesn't make much sense to let
# the user cancel. Cancel what, exactly?
self._cancelButton.set_sensitive(False)
if not anyLeft:
self.window.response(1)
self._set_login_sensitive(True)
return False
def _set_login_sensitive(self, sensitivity):
for child in self._loginGrid.get_children():
if child != self._loginConditionNotebook:
child.set_sensitive(sensitivity)
def on_login_clicked(self, *args):
# Make the buttons UI while we work.
self._okButton.set_sensitive(False)
self._cancelButton.set_sensitive(False)
self._loginButton.set_sensitive(False)
self._loginConditionNotebook.set_current_page(0)
self._set_login_sensitive(False)
spinner = self.builder.get_object("loginSpinner")
spinner.start()
spinner.set_visible(True)
spinner.show()
# Are we reusing the credentials from the discovery step? If so, grab them
# out of the UI again here. They should still be there.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_LOGIN, target=self._login,
args=(credentials,)))
GLib.timeout_add(250, self._check_login)
def on_login_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
self._loginButton.set_sensitive(credentials_valid(credentials))
| gpl-2.0 |
AstroTech/workshop-python | django/solution/addressbook/addressbook/contact/admin.py | 1 | 3167 | from django.contrib import admin
from django.db import models
from django.forms import CheckboxSelectMultiple
from django.utils.translation import ugettext_lazy as _
from contact.models import Contact, Address
class AddressInline(admin.TabularInline):
model = Address
max_num = 10
min_num = 2
extra = 1
@admin.register(Address)
class AddressAdmin(admin.ModelAdmin):
autocomplete_fields = ['contact']
class AgeFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _('Age')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'age'
def lookups(self, request, model_admin):
return [
('None', _('Not Specified')),
('0-10', _('0-10')),
('11-20', _('11-20')),
('21-30', _('21-30')),
('31-40', _('31-40')),
('41-50', _('41-50')),
('51-60', _('51-60')),
('Older', _('Older')),
]
def queryset(self, request, queryset):
if self.value() == 'None':
return queryset.filter(date_of_birth=None)
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
list_display = ['last_name', 'first_name', 'date_of_birth', 'column_age']
list_display_links = ['last_name']
search_fields = ['^last_name']
list_filter = ['created', 'modified', AgeFilter]
inlines = [AddressInline]
exclude = ['reporter', 'created', 'updated']
ordering = ['last_name', 'first_name']
autocomplete_fields = ['friends']
# formfield_overrides = {models.ManyToManyField: {'widget': CheckboxSelectMultiple}}
radio_fields = {
'gender': admin.HORIZONTAL,
'status': admin.VERTICAL
}
fieldsets = [
(_('Personal Data'), {'fields': ['last_name', 'first_name', 'date_of_birth', 'gender']}),
(_('Additional Data'), {'fields': ['email', 'bio', 'image']}),
(_('Relations'), {'fields': ['status', 'friends']}),
]
def get_list_display(self, request):
list_display = super().get_list_display(request)
if request.user.is_superuser and 'is_deleted' not in list_display:
list_display += ['is_deleted']
return list_display
def get_queryset(self, request):
queryset = super().get_queryset(request)
if request.user.is_superuser:
return queryset
else:
return queryset.filter(is_deleted=False)
def column_age(self, obj):
age = obj.get_age()
return str(age) if age else ''
# https://docs.djangoproject.com/en/2.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_display
column_age.short_description = _('Age')
column_age.empty_value_display = ''
column_age.admin_order_field = 'date_of_birth'
def save_model(self, request, obj, form, change):
obj.reporter = request.user
super().save_model(request, obj, form, change)
class Media:
js = [
'contact/js/alert.js',
]
css = {'all': [
'contact/css/style.css',
]} | mit |
UTSA-ICS/python-keystoneclient-SID | keystoneclient/apiclient/exceptions.py | 6 | 1124 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
Deprecated since v0.7.1. Use 'keystoneclient.exceptions' instead of
this module.
"""
import warnings
from keystoneclient.exceptions import * # noqa
warnings.warn("The 'keystoneclient.apiclient.exceptions' module is deprecated "
"since v.0.7.1. Use 'keystoneclient.exceptions' instead of this "
"module.", DeprecationWarning)
| apache-2.0 |
MediaSapiens/wavesf | django/contrib/gis/management/commands/inspectdb.py | 311 | 1553 | from optparse import make_option
from django.core.management.base import CommandError
from django.core.management.commands.inspectdb import Command as InspectDBCommand
class Command(InspectDBCommand):
db_module = 'django.contrib.gis.db'
gis_tables = {}
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row)
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col)
field_params.update(geo_params)
# Adding the table name and column to the `gis_tables` dictionary, this
# allows us to track which tables need a GeoManager.
if table_name in self.gis_tables:
self.gis_tables[table_name].append(geo_col)
else:
self.gis_tables[table_name] = [geo_col]
return field_type, field_params, field_notes
def get_meta(self, table_name):
meta_lines = super(Command, self).get_meta(table_name)
if table_name in self.gis_tables:
# If the table is a geographic one, then we need make
# GeoManager the default manager for the model.
meta_lines.insert(0, ' objects = models.GeoManager()')
return meta_lines
| bsd-3-clause |
x684867/nemesis | src/node/deps/v8/tools/testrunner/network/__init__.py | 651 | 1571 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| mit |
PerplexedZombie/Actual-DnD-like-actual | CharacterBuilder/Races.py | 1 | 8400 | #Why is there so many races
#This is the holding file for information on all the races
#I hate this. So much
import Laws
#This means that it will have access to the laws set out in other files
#This shall be long winded and unfun
if Laws.user_race == 'Aarakocra':
print('Something about being BS')
Laws.user_stats['Dex'] += 2
Laws.user_stats['Wis'] += 1
Laws.user_other_traits.append('Flight')
Laws.user_other_traits.append('Talons')
if Laws.user_race == 'Aasimar':
print('I always say this wrong')
Laws.user_stats['Cha'] += 2
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Celestial')
if Laws.user_race == 'Bugbear':
print('More like HUGbear amirite!')
Laws.user_stats['Str'] += 2
Laws.user_stats['Dex'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Long-Limbed')
Laws.user_other_traits.append('Powerful Build')
Laws.user_other_traits.append('Sneaky')
Laws.user_other_traits.append('Surprise Attack')
if Laws.user_race == 'Dragonborn':
print('1 of many edgelord races')
Laws.user_stats['Str'] += 2
Laws.user_stats['Cha'] += 1
Laws.user_other_traits.append('Draconic Ancestry')
Laws.user_other_traits.append('Breath Weapon')
Laws.user_other_traits.append('Damage Resistance')
if Laws.user_race == 'Dwarf':
print('Staples')
Laws.user_stats['Con'] += 2
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Dwarven Resilience')
Laws.user_other_traits.append('Dwarven Combat Training')
Laws.user_other_traits.append('Stonecunning')
if Laws.user_race == 'Elf':
print('Even more Staples')
Laws.user_stats['Dex'] += 2
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Keen Senses')
Laws.user_other_traits.append('Fey Ancestry')
Laws.user_other_traits.append('Trance')
if Laws.user_race == 'Feral Tiefling':
print('Why is there "Feral" at the front')
Laws.user_stats['Dex'] += 2
Laws.user_stats['Int'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Hellish Resistance')
Laws.user_other_traits.append('Infernal')
if Laws.user_race == 'Firbolg':
print('I don\'t know what this is...')
Laws.user_stats['Str'] += 2
Laws.user_stats['Dex'] += 1
Laws.user_other_traits.append('Firbolg')
if Laws.user_race == 'Genasi':
print('I say this one wrong too')
Laws.user_stats['Con'] += 2
if Laws.user_race == 'Gnome':
print('Gnomeo oh Gnomeo where art thou my Gnomeo')
Laws.user_stats['Int'] += 2
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Gnome Cunning')
if Laws.user_race == 'Goblin':
print('This is an enemy not a race')
Laws.user_stats['Dex'] += 2
Laws.user_stats['Con'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Fury of the Small')
Laws.user_other_traits.append('Nimble Escape')
if Laws.user_race == 'Goliath':
print('Large individual')
Laws.user_stats['Str'] += 2
Laws.user_stats['Con'] += 1
Laws.user_other_traits.append('Natural Athlete')
Laws.user_other_traits.append('Stone\'s Endurance')
Laws.user_other_traits.append('Powerful Build')
Laws.user_other_traits.append('Mountain Born')
if Laws.user_race == 'Half-Elf':
print('Half-this, half-that, whole lot of uselessness')
Laws.user_stats['Cha'] += 2
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Fey Ancestry')
Laws.user_other_traits.append('Skill Versatility')
if Laws.user_race == 'Halfling':
print('Half-a-thing!')
Laws.user_stats['Dex'] += 2
Laws.user_other_traits.append('Lucky')
Laws.user_other_traits.append('Brave')
Laws.user_other_traits.append('Halfling Nimbleness')
if Laws.user_race == 'Half-Orc':
print('Half the practicality, twice the angst')
Laws.user_stats['Str'] += 2
Laws.user_stats['Con'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Menacing')
Laws.user_other_traits.append('Relentless')
Laws.user_other_traits.append('Endurance')
Laws.user_other_traits.append('Savage Attack')
if Laws.user_race == 'Hobgoblin':
print('Somehow better than Goblins')
Laws.user_stats['Con'] += 2
Laws.user_stats['Int'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Martial Training')
Laws.user_other_traits.append('Saving Face')
if Laws.user_race == 'Human':
print('Also BS because science')
Laws.user_stats['Str'] += 1
Laws.user_stats['Dex'] += 1
Laws.user_other_traits.append('Extra Language')
if Laws.user_race == 'Kenku':
print('Like edgy but with more(?) RP')
Laws.user_stats['Dex'] += 2
Laws.user_stats['Wis'] += 1
Laws.user_other_traits.append('Expert Forgery')
Laws.user_other_traits.append('Kenku Training')
Laws.user_other_traits.append('Mimicry')
if Laws.user_race == 'Kobald':
print('Not what people think they are')
Laws.user_stats['Str'] -= 2
Laws.user_stats['Dex'] += 2
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Grovel, Cower, and Beg')
Laws.user_other_traits.append('Pack Tactics')
Laws.user_other_traits.append('Sunlight Sensitivity')
if Laws.user_race == 'Lizardfolk':
print('I\'m running out of creativity')
Laws.user_stats['Con'] += 2
Laws.user_stats['Wis'] += 1
Laws.user_other_traits.append('Bite')
Laws.user_other_traits.append('Cunning Artisan')
Laws.user_other_traits.append('Hold Breath')
Laws.user_other_traits.append('Hunte\'s Lore')
Laws.user_other_traits.append('Natural Armour')
if Laws.user_race == 'Orc':
print('AngstAngstAngstRageAngst')
Laws.user_stats['Str'] += 2
Laws.user_stats['Con'] += 1
Laws.user_stats['Int'] -= 2
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Aggressive')
Laws.user_other_traits.append('Menacing')
Laws.user_other_traits.append('Powerful Build')
if Laws.user_race == 'Tabaxi':
print('Literally a race from Skyrim')
Laws.user_stats['Dex'] += 2
Laws.user_stats['Cha'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Feline Agility')
Laws.user_other_traits.append('Cat\'s Claws')
Laws.user_other_traits.append('Cat\'s Talent')
if Laws.user_race == 'Tiefling':
print('I had this already?')
Laws.user_stats['Cha'] += 2
Laws.user_stats['Int'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Hellish Resistance')
Laws.user_other_traits.append('Infernal Legacy')
if Laws.user_race == 'Tortle':
print('What\'t not to love!')
Laws.user_stats['Str'] += 2
Laws.user_stats['Wis'] += 1
Laws.user_other_traits.append('Claws')
Laws.user_other_traits.append('Hold Breath')
Laws.user_other_traits.append('Natural Armour')
Laws.user_other_traits.append('Shell Defense')
Laws.user_other_traits.append('Survival Instinct')
if Laws.user_race == 'Triton':
print('Fish people?')
Laws.user_stats['Str'] += 1
Laws.user_stats['Con'] += 1
Laws.user_stats['Cha'] += 1
Laws.user_other_traits.append('Amphibious')
Laws.user_other_traits.append('Control Air and Water')
Laws.user_other_traits.append('Emissary of the Sea')
Laws.user_other_traits.append('Guardians of the Depths')
if Laws.user_race == 'Yuan-ti Pureblood':
print('Snake people')
Laws.user_stats['Cha'] += 2
Laws.user_stats['Int'] += 1
Laws.user_other_traits.append('Darkvision')
Laws.user_other_traits.append('Innate Spellcasting')
Laws.user_other_traits.append('Magic Resistance')
Laws.user_other_traits.append('Poison Imminity')
#I have done it this way, mostly because I don't know any other way
#and because this is suppposedly the correct way?
#My god it was long though
| mit |
leviroth/praw | praw/models/auth.py | 1 | 5459 | """Provide the Auth class."""
from prawcore import (
Authorizer,
ImplicitAuthorizer,
UntrustedAuthenticator,
session,
)
from .base import PRAWBase
from ..exceptions import ClientException
class Auth(PRAWBase):
"""Auth provides an interface to Reddit's authorization."""
@property
def limits(self):
"""Return a dictionary containing the rate limit info.
The keys are:
:remaining: The number of requests remaining to be made in the
current rate limit window.
:reset_timestamp: A unix timestamp providing an upper bound on when the
rate limit counters will reset.
:used: The number of requests made in the current rate limit
window.
All values are initially ``None`` as these values are set in response
to issued requests.
The ``reset_timestamp`` value is an upper bound as the real timestamp
is computed on Reddit's end in preparation for sending the
response. This value may change slightly within a given window due to
slight changes in response times and rounding.
"""
data = self._reddit._core._rate_limiter
return {
"remaining": data.remaining,
"reset_timestamp": data.reset_timestamp,
"used": data.used,
}
def authorize(self, code):
"""Complete the web authorization flow and return the refresh token.
:param code: The code obtained through the request to the redirect uri.
:returns: The obtained refresh token, if available, otherwise ``None``.
The session's active authorization will be updated upon success.
"""
authenticator = self._reddit._read_only_core._authorizer._authenticator
authorizer = Authorizer(authenticator)
authorizer.authorize(code)
authorized_session = session(authorizer)
self._reddit._core = self._reddit._authorized_core = authorized_session
return authorizer.refresh_token
def implicit(self, access_token, expires_in, scope):
"""Set the active authorization to be an implicit authorization.
:param access_token: The access_token obtained from Reddit's callback.
:param expires_in: The number of seconds the ``access_token`` is valid
for. The origin of this value was returned from Reddit's callback.
You may need to subtract an offset before passing in this number to
account for a delay between when Reddit prepared the response, and
when you make this function call.
:param scope: A space-delimited string of Reddit OAuth2 scope names as
returned from Reddit's callback.
Raise :class:`.ClientException` if :class:`.Reddit` was initialized for
a non-installed application type.
"""
authenticator = self._reddit._read_only_core._authorizer._authenticator
if not isinstance(authenticator, UntrustedAuthenticator):
raise ClientException(
"implicit can only be used with installed apps."
)
implicit_session = session(
ImplicitAuthorizer(authenticator, access_token, expires_in, scope)
)
self._reddit._core = self._reddit._authorized_core = implicit_session
def scopes(self):
"""Return a set of scopes included in the current authorization.
For read-only authorizations this should return ``{'*'}``.
"""
authorizer = self._reddit._core._authorizer
if not authorizer.is_valid():
authorizer.refresh()
return authorizer.scopes
def url(self, scopes, state, duration="permanent", implicit=False):
"""Return the URL used out-of-band to grant access to your application.
:param scopes: A list of OAuth scopes to request authorization for.
:param state: A string that will be reflected in the callback to
``redirect_uri``. This value should be temporarily unique to the
client for whom the URL was generated for.
:param duration: Either ``permanent`` or ``temporary`` (default:
permanent). ``temporary`` authorizations generate access tokens
that last only 1 hour. ``permanent`` authorizations additionally
generate a refresh token that can be indefinitely used to generate
new hour-long access tokens. This value is ignored when
``implicit=True``.
:param implicit: For **installed** applications, this value can be set
to use the implicit, rather than the code flow. When True, the
``duration`` argument has no effect as only temporary tokens can be
retrieved.
"""
authenticator = self._reddit._read_only_core._authorizer._authenticator
if authenticator.redirect_uri is self._reddit.config.CONFIG_NOT_SET:
raise ClientException("redirect_uri must be provided")
if isinstance(authenticator, UntrustedAuthenticator):
return authenticator.authorize_url(
"temporary" if implicit else duration,
scopes,
state,
implicit=implicit,
)
if implicit:
raise ClientException(
"implicit can only be set for installed applications"
)
return authenticator.authorize_url(duration, scopes, state)
| bsd-2-clause |
jgcaaprom/android_external_chromium_org | build/android/pylib/device/adb_wrapper.py | 36 | 12777 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module wraps Android's adb tool.
This is a thin wrapper around the adb interface. Any additional complexity
should be delegated to a higher level (ex. DeviceUtils).
"""
import errno
import os
from pylib import cmd_helper
from pylib.device import decorators
from pylib.device import device_errors
_DEFAULT_TIMEOUT = 30
_DEFAULT_RETRIES = 2
def _VerifyLocalFileExists(path):
"""Verifies a local file exists.
Args:
path: Path to the local file.
Raises:
IOError: If the file doesn't exist.
"""
if not os.path.exists(path):
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), path)
class AdbWrapper(object):
"""A wrapper around a local Android Debug Bridge executable."""
def __init__(self, device_serial):
"""Initializes the AdbWrapper.
Args:
device_serial: The device serial number as a string.
"""
self._device_serial = str(device_serial)
# pylint: disable=W0613
@classmethod
@decorators.WithTimeoutAndRetries
def _RunAdbCmd(cls, arg_list, timeout=None, retries=None, check_error=True):
cmd = ['adb'] + arg_list
exit_code, output = cmd_helper.GetCmdStatusAndOutput(cmd)
if exit_code != 0:
raise device_errors.AdbCommandFailedError(
cmd, 'returned non-zero exit code %s, output: %s' %
(exit_code, output))
# This catches some errors, including when the device drops offline;
# unfortunately adb is very inconsistent with error reporting so many
# command failures present differently.
if check_error and output[:len('error:')] == 'error:':
raise device_errors.AdbCommandFailedError(arg_list, output)
return output
# pylint: enable=W0613
def _DeviceAdbCmd(self, arg_list, timeout, retries, check_error=True):
"""Runs an adb command on the device associated with this object.
Args:
arg_list: A list of arguments to adb.
timeout: Timeout in seconds.
retries: Number of retries.
check_error: Check that the command doesn't return an error message. This
does NOT check the return code of shell commands.
Returns:
The output of the command.
"""
return self._RunAdbCmd(
['-s', self._device_serial] + arg_list, timeout=timeout,
retries=retries, check_error=check_error)
def __eq__(self, other):
"""Consider instances equal if they refer to the same device.
Args:
other: The instance to compare equality with.
Returns:
True if the instances are considered equal, false otherwise.
"""
return self._device_serial == str(other)
def __str__(self):
"""The string representation of an instance.
Returns:
The device serial number as a string.
"""
return self._device_serial
def __repr__(self):
return '%s(\'%s\')' % (self.__class__.__name__, self)
# TODO(craigdh): Determine the filter criteria that should be supported.
@classmethod
def GetDevices(cls, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Get the list of active attached devices.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Yields:
AdbWrapper instances.
"""
output = cls._RunAdbCmd(['devices'], timeout=timeout, retries=retries)
lines = [line.split() for line in output.split('\n')]
return [AdbWrapper(line[0]) for line in lines
if len(line) == 2 and line[1] == 'device']
def GetDeviceSerial(self):
"""Gets the device serial number associated with this object.
Returns:
Device serial number as a string.
"""
return self._device_serial
def Push(self, local, remote, timeout=60*5, retries=_DEFAULT_RETRIES):
"""Pushes a file from the host to the device.
Args:
local: Path on the host filesystem.
remote: Path on the device filesystem.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
_VerifyLocalFileExists(local)
self._DeviceAdbCmd(['push', local, remote], timeout, retries)
def Pull(self, remote, local, timeout=60*5, retries=_DEFAULT_RETRIES):
"""Pulls a file from the device to the host.
Args:
remote: Path on the device filesystem.
local: Path on the host filesystem.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
self._DeviceAdbCmd(['pull', remote, local], timeout, retries)
_VerifyLocalFileExists(local)
def Shell(self, command, expect_rc=None, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Runs a shell command on the device.
Args:
command: The shell command to run.
expect_rc: (optional) If set checks that the command's return code matches
this value.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
The output of the shell command as a string.
Raises:
device_errors.AdbCommandFailedError: If the return code doesn't match
|expect_rc|.
"""
if expect_rc is None:
actual_command = command
else:
actual_command = '%s; echo $?;' % command
output = self._DeviceAdbCmd(
['shell', actual_command], timeout, retries, check_error=False)
if expect_rc is not None:
output_end = output.rstrip().rfind('\n') + 1
rc = output[output_end:].strip()
output = output[:output_end]
if int(rc) != expect_rc:
raise device_errors.AdbCommandFailedError(
['shell', command],
'shell command exited with code: %s' % rc,
self._device_serial)
return output
def Logcat(self, filter_spec=None, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Get the logcat output.
Args:
filter_spec: (optional) Spec to filter the logcat.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
logcat output as a string.
"""
cmd = ['logcat']
if filter_spec is not None:
cmd.append(filter_spec)
return self._DeviceAdbCmd(cmd, timeout, retries, check_error=False)
def Forward(self, local, remote, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Forward socket connections from the local socket to the remote socket.
Sockets are specified by one of:
tcp:<port>
localabstract:<unix domain socket name>
localreserved:<unix domain socket name>
localfilesystem:<unix domain socket name>
dev:<character device name>
jdwp:<process pid> (remote only)
Args:
local: The host socket.
remote: The device socket.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
self._DeviceAdbCmd(['forward', str(local), str(remote)], timeout, retries)
def JDWP(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""List of PIDs of processes hosting a JDWP transport.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
A list of PIDs as strings.
"""
return [a.strip() for a in
self._DeviceAdbCmd(['jdwp'], timeout, retries).split('\n')]
def Install(self, apk_path, forward_lock=False, reinstall=False,
sd_card=False, timeout=60*2, retries=_DEFAULT_RETRIES):
"""Install an apk on the device.
Args:
apk_path: Host path to the APK file.
forward_lock: (optional) If set forward-locks the app.
reinstall: (optional) If set reinstalls the app, keeping its data.
sd_card: (optional) If set installs on the SD card.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
_VerifyLocalFileExists(apk_path)
cmd = ['install']
if forward_lock:
cmd.append('-l')
if reinstall:
cmd.append('-r')
if sd_card:
cmd.append('-s')
cmd.append(apk_path)
output = self._DeviceAdbCmd(cmd, timeout, retries)
if 'Success' not in output:
raise device_errors.AdbCommandFailedError(cmd, output)
def Uninstall(self, package, keep_data=False, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Remove the app |package| from the device.
Args:
package: The package to uninstall.
keep_data: (optional) If set keep the data and cache directories.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
cmd = ['uninstall']
if keep_data:
cmd.append('-k')
cmd.append(package)
output = self._DeviceAdbCmd(cmd, timeout, retries)
if 'Failure' in output:
raise device_errors.AdbCommandFailedError(cmd, output)
def Backup(self, path, packages=None, apk=False, shared=False,
nosystem=True, include_all=False, timeout=_DEFAULT_TIMEOUT,
retries=_DEFAULT_RETRIES):
"""Write an archive of the device's data to |path|.
Args:
path: Local path to store the backup file.
packages: List of to packages to be backed up.
apk: (optional) If set include the .apk files in the archive.
shared: (optional) If set buckup the device's SD card.
nosystem: (optional) If set exclude system applications.
include_all: (optional) If set back up all installed applications and
|packages| is optional.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
cmd = ['backup', path]
if apk:
cmd.append('-apk')
if shared:
cmd.append('-shared')
if nosystem:
cmd.append('-nosystem')
if include_all:
cmd.append('-all')
if packages:
cmd.extend(packages)
assert bool(packages) ^ bool(include_all), (
'Provide \'packages\' or set \'include_all\' but not both.')
ret = self._DeviceAdbCmd(cmd, timeout, retries)
_VerifyLocalFileExists(path)
return ret
def Restore(self, path, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Restore device contents from the backup archive.
Args:
path: Host path to the backup archive.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
_VerifyLocalFileExists(path)
self._DeviceAdbCmd(['restore'] + [path], timeout, retries)
def WaitForDevice(self, timeout=60*5, retries=_DEFAULT_RETRIES):
"""Block until the device is online.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
self._DeviceAdbCmd(['wait-for-device'], timeout, retries)
def GetState(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Get device state.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
One of 'offline', 'bootloader', or 'device'.
"""
return self._DeviceAdbCmd(['get-state'], timeout, retries).strip()
def GetDevPath(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Gets the device path.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
Returns:
The device path (e.g. usb:3-4)
"""
return self._DeviceAdbCmd(['get-devpath'], timeout, retries)
def Remount(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Remounts the /system partition on the device read-write."""
self._DeviceAdbCmd(['remount'], timeout, retries)
def Reboot(self, to_bootloader=False, timeout=60*5,
retries=_DEFAULT_RETRIES):
"""Reboots the device.
Args:
to_bootloader: (optional) If set reboots to the bootloader.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
if to_bootloader:
cmd = ['reboot-bootloader']
else:
cmd = ['reboot']
self._DeviceAdbCmd(cmd, timeout, retries)
def Root(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
"""Restarts the adbd daemon with root permissions, if possible.
Args:
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
"""
output = self._DeviceAdbCmd(['root'], timeout, retries)
if 'cannot' in output:
raise device_errors.AdbCommandFailedError(['root'], output)
| bsd-3-clause |
kurikuri99/xen_study | tools/xm-test/lib/XmTestLib/XenDevice.py | 38 | 8958 | #!/usr/bin/python
"""
Copyright (C) International Business Machines Corp., 2005, 2006
Authors: Dan Smith <danms@us.ibm.com>
Daniel Stekloff <dsteklof@us.ibm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; under version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import commands
import os
import re
import time
from Xm import *
from Test import *
from config import *
from XenDomain import *
from NetConfig import *
from XmTestLib import *
from __init__ import *
class XenNetDevCmd:
def __init__(self, netDevice, addCmd, removeCmd):
"""Object representing a network device command"""
self.addcmd = addCmd
self.removecmd = removeCmd
self.addhasrun = False
self.rmvhasrun = False
self.netdevice = netDevice
def getAddCmd(self):
return self.addcmd
def getRemoveCmd(self):
return self.removecmd
def hasAddRun(self):
return self.addhasrun
def hasRemoveRun(self):
self.rmvhasrun
def runAddCmd(self, runOnDom0=False):
# Defaults running command on dom0, if console then will run there
if runOnDom0 == False:
dom = self.netdevice.getDomain()
console = dom.getConsole()
console.runCmd(self.addcmd)
else:
status, output = traceCommand(self.addcmd)
if status:
raise NetworkError("Device add cmd failed: %s Status: %d"
% (self.addcmd, status))
self.addhasrun = True
def runRemoveCmd(self, runOnDom0=False):
# Defaults running command on dom0, if console then will run there
if runOnDom0 == False:
dom = self.netdevice.getDomain()
console = dom.getConsole()
console.runCmd(self.removecmd)
else:
status, output = traceCommand(self.removecmd)
if status:
raise NetworkError("Device remove cmd failed: %s Status: %d"
% (self.removecmd, status))
self.removehasrun = True
class XenDevice:
def __init__(self, domain, id, devConfig=None):
"""An object to represent Xen Devices like network and block
@param domain: Domain the device will be added to
@param id: Device identifier
@param devConfig: Initial configuration dictionary for XenDevice
"""
if config:
self.config = devConfig
else:
self.config = {}
self.id = id
self.domain = domain
self.configNode = None
# Commands run when domain is started or devices added and removed.
self.dom0_cmds = []
self.domU_cmds = []
def __str__(self):
"""Convert device config to XenConfig node compatible string"""
confstr = ''
for k, v in self.config.items():
if len(confstr) > 0:
confstr += ', '
if isinstance(v, int):
confstr += "%s=%i" % (k, v)
elif isinstance(v, list) and v:
confstr += "%s=%s" % (k, v)
elif isinstance(v, str) and v:
confstr += "%s=%s" % (k, v)
return confstr
def execAddCmds(self):
# Cmds for when a device is added to the system
if len(self.dom0_cmds) > 0:
for i in range(0, len(self.dom0_cmds)):
if self.dom0_cmds[i].getAddCmd():
self.dom0_cmds[i].runAddCmd(runOnDom0=True)
if len(self.domU_cmds) > 0:
for i in range(0, len(self.domU_cmds)):
if self.domU_cmds[i].getAddCmd():
self.domU_cmds[i].runAddCmd()
def execRemoveCmds(self):
# Cmds for when a device is removed from the system
if len(self.dom0_cmds) > 0:
for i in range(0, len(self.dom0_cmds)):
if (self.dom0_cmds[i].getRemoveCmd()
and self.dom0_cmds[i].hasAddRun() == True):
self.dom0_cmds[i].runRemoveCmd(runOnDom0=True)
if len(self.domU_cmds) > 0:
for i in range(0, len(self.domU_cmds)):
if (self.domU_cmds[i].getRemoveCmd()
and self.domU_cmds[i].hasAddRun() == True):
self.domU_cmds[i].runRemoveCmd()
def removeDevice(self):
self.execRemoveCmds()
def getId(self):
return self.id
def getConfigOpt(self):
return self.configNode
def getDomain(self):
return self.domain
class XenNetDevice(XenDevice):
def __init__(self, domain, id, devConfig=None):
"""An object to represent Xen Network Device
@param domain: Domain the device is being added to
@param id: Network device identifier, interface name like eth0
@param devConfig: Initial dictionary configuration for XenNetDevice
"""
if devConfig:
self.config = devConfig
else:
self.config = {}
self.id = id
self.domain = domain
self.configNode = "vif"
self.dom0_cmds = []
self.domU_cmds = []
self.network = None
self.netmask = None
self.ip = None
self.dom0_alias_ip = None
if domain.getDomainType() == "HVM":
self.config["type"] = "ioemu"
if not self.config.has_key('bridge'):
self.config["bridge"] = "xenbr0"
if self.config.has_key("ip"):
self.setNetDevIP(ip=self.config["ip"])
else:
if NETWORK_IP_RANGE != "dhcp":
self.setNetDevIP()
def __del__(self):
# Make sure we clean up NetConfig's list of ips, so the ip can be
# reused
self.releaseNetDevIP()
def addIfconfigCmd(self, domU=True):
# Method to add start and remove ifconfig functions
if domU == True:
locmd = XenNetDevCmd(self, addCmd="ifconfig lo 127.0.0.1", removeCmd=None)
ifcmd = []
# Start or Add cmd
acmd = 'ifconfig %s inet %s netmask %s up' % (self.id, self.ip, self.netmask)
rcmd = 'ifconfig %s down' % self.id
ifcmd = XenNetDevCmd(self, addCmd=acmd, removeCmd=rcmd)
if domU == True:
self.domU_cmds.append(locmd)
self.domU_cmds.append(ifcmd)
else:
self.dom0_cmds.append(ifcmd)
def removeDevice(self):
self.releaseNetDevIP()
def addDom0AliasCmd(self, dev=DOM0_INTF):
# Method to add start and remove dom0 alias cmds
acmd = 'ip addr add %s dev %s' % (self.dom0_alias_ip, dev)
rcmd = 'ip addr del %s/32 dev %s' % (self.dom0_alias_ip, dev)
aliascmd = XenNetDevCmd(self, addCmd=acmd, removeCmd=rcmd)
self.dom0_cmds.append(aliascmd)
def releaseNetDevIP(self):
# Must remove start cmds for ip configuration and then release from
# NetConfig
self.execRemoveCmds()
self.dom0_cmds = []
self.domU_cmds = []
if self.config.has_key("ip"):
del self.config["ip"]
if self.dom0_alias_ip:
xmtest_netconf.releaseIP("domain0", self.domain.getName(), self.dom0_alias_ip)
xmtest_netconf.releaseIP(self.domain.getName(), self.id, self.ip)
def getNetDevIP(self):
return self.ip
def getDom0AliasIP(self):
return self.dom0_alias_ip
def getNetwork(self):
return self.network
def get_netmask(self):
return self.netmask
def setNetDevIP(self, ip=None):
# Function to set a new IP for NetDevice.
if NETWORK_IP_RANGE == "dhcp":
raise NetworkError("System configured for dhcp, cannot set new ip.")
if (self.ip and not ip) or ((self.ip and ip) and (self.ip != ip)):
self.releaseNetDevIP()
if not self.netmask:
self.netmask = xmtest_netconf.getNetMask()
if not self.network:
self.network = xmtest_netconf.getNetwork()
if ip:
xmtest_netconf.setIP(self.domain.getName(), self.id, ip)
self.ip = ip
else:
self.ip = xmtest_netconf.getIP(self.domain.getName(), self.id)
self.addIfconfigCmd()
self.config["ip"] = str(self.ip)
# Setup an alias for Dom0
self.dom0_alias_ip = xmtest_netconf.getIP("domain0", self.domain.getName())
self.addDom0AliasCmd()
| gpl-2.0 |
kevin-coder/tensorflow-fork | tensorflow/contrib/checkpoint/python/python_state_test.py | 8 | 4053 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy
from tensorflow.contrib.checkpoint.python import python_state
from tensorflow.python.client import session
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import util
class NumpyStateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreNumpyState(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
save_state = python_state.NumpyState()
saver = util.Checkpoint(numpy=save_state)
save_state.a = numpy.ones([2, 2])
save_state.b = numpy.ones([2, 2])
save_state.b = numpy.zeros([2, 2])
save_state.c = numpy.int64(3)
self.assertAllEqual(numpy.ones([2, 2]), save_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), save_state.b)
self.assertEqual(3, save_state.c)
first_save_path = saver.save(prefix)
save_state.a[1, 1] = 2.
save_state.c = numpy.int64(4)
second_save_path = saver.save(prefix)
load_state = python_state.NumpyState()
loader = util.Checkpoint(numpy=load_state)
loader.restore(first_save_path).initialize_or_restore()
self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
self.assertEqual(3, load_state.c)
load_state.a[0, 0] = 42.
self.assertAllEqual([[42., 1.], [1., 1.]], load_state.a)
loader.restore(first_save_path).run_restore_ops()
self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
loader.restore(second_save_path).run_restore_ops()
self.assertAllEqual([[1., 1.], [1., 2.]], load_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
self.assertEqual(4, load_state.c)
def testNoGraphPollution(self):
graph = ops.Graph()
with graph.as_default(), session.Session():
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
save_state = python_state.NumpyState()
saver = util.Checkpoint(numpy=save_state)
save_state.a = numpy.ones([2, 2])
save_path = saver.save(prefix)
saver.restore(save_path)
graph.finalize()
saver.save(prefix)
save_state.a = numpy.zeros([2, 2])
saver.save(prefix)
saver.restore(save_path)
@test_util.run_in_graph_and_eager_modes
def testNoMixedNumpyStateTF(self):
save_state = python_state.NumpyState()
save_state.a = numpy.ones([2, 2])
with self.assertRaises(NotImplementedError):
save_state.v = variables.Variable(1.)
@test_util.run_in_graph_and_eager_modes
def testDocstringExample(self):
arrays = python_state.NumpyState()
checkpoint = util.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
self.assertAllEqual(numpy.zeros([3, 4]), arrays.x)
second_checkpoint = util.Checkpoint(numpy_arrays=python_state.NumpyState())
second_checkpoint.restore(save_path)
self.assertAllEqual(numpy.zeros([3, 4]), second_checkpoint.numpy_arrays.x)
if __name__ == "__main__":
test.main()
| apache-2.0 |
zhuwenping/python-for-android | python3-alpha/python3-src/Lib/collections.py | 109 | 40821 | __all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev/next links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
################################################################################
### namedtuple
################################################################################
_class_template = '''\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = list(map(str, field_names))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name) for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
try:
exec(class_definition, namespace)
except SyntaxError as e:
raise SyntaxError(e.msg + ':\n\n' + class_definition)
result = namespace[typename]
if verbose:
print(class_definition)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class _ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
################################################################################
### Simple tests
################################################################################
if __name__ == '__main__':
# verify that instances can be pickled
from pickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print (p)
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print(Point(11, 22)._replace(x=100))
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print(Point3D.__doc__)
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print(TestResults(*doctest.testmod()))
| apache-2.0 |
skelton/G5_444 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
Carrotsmile/CS428 | steerstats/tools/plotting/animating/anim_scatter.py | 8 | 3367 | """
Matplotlib Animation Example
author: Jake Vanderplas
email: vanderplas@astro.washington.edu
website: http://jakevdp.github.com
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import random
import sys
import csv
filename = sys.argv[1]
data = []
for i in range(1, int(sys.argv[2])):
tmp_filename = filename + str(i) + ".csv"
csvfile = open(tmp_filename, 'r')
spamreader = csv.reader(csvfile, delimiter=',')
tmp_data = []
for row in spamreader:
tmp_data.append([float(row[0]), float(row[1]), float(row[2])])
# tmp_data.append([float(row[0]), float(row[1]), float(row[2])])
data.append(tmp_data)
data = np.array(data)
up = 2
low = 0
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = fig.add_subplot(231)
ax.set_xlabel('Efficency Metric')
ax.set_ylabel('PLE Metric')
ax.set_xlim([0.9,1])
ax.set_ylim([0.1,0.9])
ax2 = fig.add_subplot(232)
ax2.set_xlabel('PLE Metric')
ax2.set_ylabel('Entropy Metric')
ax2.set_ylim([1,4])
ax2.set_xlim([0,1])
ax3 = fig.add_subplot(233)
ax3.set_xlabel('Efficency Metric')
ax3.set_ylabel('Entropy Metric')
ax3.set_ylim([1.5,3])
ax3.set_xlim([0.9,1])
ax4 = fig.add_subplot(212, projection='3d')
ax4.set_xlabel('Efficency Metric')
ax4.set_ylabel('PLE Metric')
ax4.set_zlabel('Entropy Metric')
ax4.set_xlim([0.9,1])
ax4.set_ylim([0.1,0.8])
ax4.set_zlim([1.0,4])
# ax = plt.axes(xlim=(low, up), ylim=(low, up))
# ax = plt.axes(xlim=(0.9, 1.0), ylim=(0, 1))
scat1 = ax.scatter([3], [4], c="b")
scat2 = ax2.scatter([3], [4], c="b")
scat3 = ax3.scatter([3], [4], c="b")
scat4 = ax4.scatter([3, 4], [4, 5], [5, 6], c="b")
# initialization function: plot the background of each frame
def init():
print "paths"
# print scat.get_paths()
# sys.exit()
# scat.set_paths(matplotlib.path.Path([[2, 3]]))
return scat1, scat2, scat3, scat4
# animation function. This is called sequentially
def animate(i):
tmp_data=data[i]
# print tmp_data[:, 1:3]
scat1.set_offsets(tmp_data[:, :2])
scat2.set_offsets(tmp_data[:, 1:3])
scat3.set_offsets(tmp_data[:, [0, 2]])
# scat4.set_offsets(tmp_data)
print scat4._offsets3d
# scat4._offsets3d = (np.ma.ravel(tmp_data[:, 0]), np.ma.ravel(tmp_data[:, 1]), np.ma.ravel(tmp_data[:, 2]))
scat4._offsets3d = (tuple(tmp_data[:, 0]), tuple(tmp_data[:, 1]), (tmp_data[:, 2]))
# scat4._offsets3d = tmp_data
# scat4.set_offsets(tmp_data[:,:2])
# scat4.set_3d_properties(tmp_data[:,2],'z')
plt.draw()
return scat1, scat2, scat3, scat4
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=100, blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show() | gpl-3.0 |
kaleidos/intranet | backend/intranet/migrations/0004_auto__del_imputation__del_unique_imputation_project_part__del_intranet.py | 2 | 14177 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'IntranetProfile', fields ['user']
db.delete_unique(u'intranet_intranetprofile', ['user_id'])
# Removing unique constraint on 'Imputation', fields ['project', 'part']
db.delete_unique(u'intranet_imputation', ['project_id', 'part_id'])
# Deleting model 'Imputation'
db.delete_table(u'intranet_imputation')
# Deleting model 'IntranetProfile'
db.delete_table(u'intranet_intranetprofile')
def backwards(self, orm):
# Adding model 'Imputation'
db.create_table(u'intranet_imputation', (
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='imputations', to=orm['intranet.Project'])),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, unique=True)),
('hours', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=500)),
('part', self.gf('django.db.models.fields.related.ForeignKey')(related_name='imputations', to=orm['intranet.Part'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'intranet', ['Imputation'])
# Adding unique constraint on 'Imputation', fields ['project', 'part']
db.create_unique(u'intranet_imputation', ['project_id', 'part_id'])
# Adding model 'IntranetProfile'
db.create_table(u'intranet_intranetprofile', (
('chargeability_cost', self.gf('django.db.models.fields.FloatField')(default=11)),
('raw_cost', self.gf('django.db.models.fields.FloatField')(default=10)),
('is_kaleidos_team', self.gf('django.db.models.fields.BooleanField')(default=True)),
('profit_cost', self.gf('django.db.models.fields.FloatField')(default=12)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='intranet_profile', to=orm['intranet.User'])),
))
db.send_create_signal(u'intranet', ['IntranetProfile'])
# Adding unique constraint on 'IntranetProfile', fields ['user']
db.create_unique(u'intranet_intranetprofile', ['user_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'intranet.assignation': {
'Meta': {'unique_together': "(('employee', 'project'),)", 'object_name': 'Assignation'},
'cost': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'employee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['intranet.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['intranet.Project']"})
},
u'intranet.client': {
'Meta': {'ordering': "['name']", 'object_name': 'Client'},
'contact_person': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'employees_number': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clients'", 'to': u"orm['intranet.Sector']"}),
'ubication': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
u'intranet.holidaysrequest': {
'Meta': {'object_name': 'HolidaysRequest'},
'beginning': ('django.db.models.fields.DateField', [], {}),
'comments': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'employee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'holidays_requests'", 'to': u"orm['intranet.User']"}),
'ending': ('django.db.models.fields.DateField', [], {}),
'flexible_dates': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'year': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'holidays_requests'", 'to': u"orm['intranet.HolidaysYear']"})
},
u'intranet.holidaysyear': {
'Meta': {'object_name': 'HolidaysYear'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'intranet.invoice': {
'Meta': {'ordering': "['estimated_through_date']", 'object_name': 'Invoice'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'to': u"orm['intranet.Client']"}),
'comments': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'concept': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'estimated_perception_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'estimated_through_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'iva': ('django.db.models.fields.FloatField', [], {'default': '0.21'}),
'number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'payment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'payment_conditions': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'perception_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'perception_state': ('django.db.models.fields.IntegerField', [], {'default': '-10'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'to': u"orm['intranet.Project']"}),
'quantity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'quantity_iva': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'through_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'intranet.part': {
'Meta': {'ordering': "['-year', '-month']", 'unique_together': "(('month', 'year', 'employee'),)", 'object_name': 'Part'},
'data': ('picklefield.fields.PickledObjectField', [], {'default': '{}'}),
'employee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': u"orm['intranet.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'month': ('django.db.models.fields.IntegerField', [], {}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'parts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['intranet.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'state': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'intranet.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project'", 'to': u"orm['intranet.Client']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'employees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'default': 'None', 'to': u"orm['intranet.User']", 'through': u"orm['intranet.Assignation']", 'symmetrical': 'False', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'is_holidays': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_month_activity': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '600'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'subscribed_projects'", 'default': 'None', 'to': u"orm['intranet.User']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'total_income': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
u'intranet.sector': {
'Meta': {'ordering': "['name']", 'object_name': 'Sector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'intranet.specialday': {
'Meta': {'ordering': "['date']", 'object_name': 'SpecialDay'},
'date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'intranet.user': {
'Meta': {'object_name': 'User'},
'chargeability_cost': ('django.db.models.fields.FloatField', [], {'default': '11'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_company_team': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profit_cost': ('django.db.models.fields.FloatField', [], {'default': '12'}),
'raw_cost': ('django.db.models.fields.FloatField', [], {'default': '10'}),
'reset_password_token': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['intranet'] | apache-2.0 |
mozilla/verbatim | vendor/lib/python/django/db/models/options.py | 92 | 21544 | import re
from bisect import bisect
from django.conf import settings
from django.db.models.related import RelatedObject
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.utils.translation import activate, deactivate_all, get_language, string_concat
from django.utils.encoding import force_unicode, smart_str
from django.utils.datastructures import SortedDict
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'auto_created')
class Options(object):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.admin = None
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.parents = SortedDict()
self.duplicate_targets = {}
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
# calling code can uniformly expect that.
ut = meta_attrs.pop('unique_together', self.unique_together)
if ut and not isinstance(ut[0], (tuple, list)):
ut = (ut,)
self.unique_together = ut
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.module_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = self.parents.value_for_index(0)
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
# Determine any sets of fields that are pointing to the same targets
# (e.g. two ForeignKeys to the same remote model). The query
# construction code needs to know this. At the end of this,
# self.duplicate_targets will map each duplicate field column to the
# columns it duplicates.
collections = {}
for column, target in self.duplicate_targets.iteritems():
try:
collections[target].add(column)
except KeyError:
collections[target] = set([column])
self.duplicate_targets = {}
for elt in collections.itervalues():
if len(elt) == 1:
continue
for column in elt:
self.duplicate_targets[column] = elt.difference(set([column]))
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
if hasattr(self, '_name_map'):
del self._name_map
def add_virtual_field(self, field):
self.virtual_fields.append(field)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_str(self.app_label), smart_str(self.module_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_unicode(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
def _fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
fields = property(_fields)
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.keys()
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.items()
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = many_to_many and (self.fields + self.many_to_many) or self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names). This is used for pretty printing
debugging output (a list of choices), so any internal-only field names
are not included.
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = cache.keys()
names.sort()
# Internal-only names end with "+" (symmetrical m2m related names being
# the main example). Trim them.
return [val for val in names if not val.endswith('+')]
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = {}
# We intentionally handle related m2m objects first so that symmetrical
# m2m accessor names can be overridden, if necessary.
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_fields_with_model():
cache[f.name] = (f, model, True, False)
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [k for k, v in self.get_all_related_objects_with_model(
local_only=local_only, include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq)]
def get_all_related_objects_with_model(self, local_only=False,
include_hidden=False,
include_proxy_eq=False):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
predicates = []
if local_only:
predicates.append(lambda k, v: not v)
if not include_hidden:
predicates.append(lambda k, v: not k.field.rel.is_hidden())
cache = (self._related_objects_proxy_cache if include_proxy_eq
else self._related_objects_cache)
return filter(lambda t: all([p(*t) for p in predicates]), cache.items())
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
# Collect also objects which are in relation to some proxy child/parent of self.
proxy_cache = cache.copy()
for klass in get_models(include_auto_created=True, only_installed=False):
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, basestring):
if self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
elif self.concrete_model == f.rel.to._meta.concrete_model:
proxy_cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
self._related_objects_proxy_cache = proxy_cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return cache.keys()
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return cache.items()
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models(only_installed=False):
for f in klass._meta.local_many_to_many:
if f.rel and not isinstance(f.rel.to, basestring) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
raise TypeError('%r is not an ancestor of this model'
% model._meta.module_name)
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_ordered_objects(self):
"Returns a list of Options objects that are ordered with respect to this object."
if not hasattr(self, '_ordered_objects'):
objects = []
# TODO
#for klass in get_models(get_app(self.app_label)):
# opts = klass._meta
# if opts.order_with_respect_to and opts.order_with_respect_to.rel \
# and self == opts.order_with_respect_to.rel.to._meta:
# objects.append(opts)
self._ordered_objects = objects
return self._ordered_objects
def pk_index(self):
"""
Returns the index of the primary key field in the self.fields list.
"""
return self.fields.index(self.pk)
| gpl-2.0 |
dtschan/weblate | weblate/trans/data.py | 3 | 2408 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Data files helpers.
"""
import shutil
import os
from weblate import appsettings
def create_and_check_dir(path):
"""Ensure directory exists and is writable by us"""
if not os.path.exists(path):
os.makedirs(path)
else:
if not os.access(path, os.W_OK):
raise OSError(
'DATA_DIR {0} is not writable!'.format(path)
)
def check_data_writable():
"""
Check we can write to data dir.
"""
create_and_check_dir(appsettings.DATA_DIR)
create_and_check_dir(data_dir('home'))
create_and_check_dir(data_dir('whoosh'))
create_and_check_dir(data_dir('ssh'))
create_and_check_dir(data_dir('vcs'))
def data_dir(component):
"""
Returns path to data dir for given component.
"""
return os.path.join(appsettings.DATA_DIR, component)
def migrate_data_dirs():
"""
Migrate data directory from old locations to new consolidated data
directory.
"""
check_data_writable()
vcs = data_dir('vcs')
if os.path.exists(appsettings.GIT_ROOT) and not os.path.exists(vcs):
shutil.move(appsettings.GIT_ROOT, vcs)
whoosh = data_dir('whoosh')
if os.path.exists(appsettings.WHOOSH_INDEX) and not os.path.exists(whoosh):
shutil.move(appsettings.WHOOSH_INDEX, whoosh)
ssh_home = os.path.expanduser('~/.ssh')
ssh = data_dir('ssh')
for name in ('known_hosts', 'id_rsa', 'id_rsa.pub'):
source = os.path.join(ssh_home, name)
target = os.path.join(ssh, name)
if os.path.exists(source) and not os.path.exists(target):
shutil.copy(source, target)
| gpl-3.0 |
Visgean/django-autocomplete-light | test_remote_project/test_remote_project/wsgi.py | 6 | 1160 | """
WSGI config for test_remote_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_remote_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/PIL/PsdImagePlugin.py | 15 | 7572 | #
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.4"
from . import Image, ImageFile, ImagePalette
from ._binary import i8, i16be as i16, i32be as i32
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3)
}
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if s[:4] != b"8BPS" or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise IOError("not enough channels")
self.mode = mode
self.size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
signature = read(4)
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if (len(data) & 1):
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 0
@property
def n_frames(self):
return len(self.layers)
@property
def is_animated(self):
return len(self.layers) > 1
def seek(self, layer):
# seek to given layer (1..max)
if layer == self.frame:
return
try:
if layer <= 0:
raise IndexError
name, mode, bbox, tile = self.layers[layer-1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
return name, bbox
except IndexError:
raise EOFError("no such layer")
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4))
x0 = i32(read(4))
y1 = i32(read(4))
x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
filler = read(12)
name = ""
size = i32(read(4))
combined = 0
if size:
length = i32(read(4))
if length:
mask_y = i32(read(4))
mask_x = i32(read(4))
mask_h = i32(read(4)) - mask_y
mask_w = i32(read(4)) - mask_x
file.seek(length - 16, 1)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, 1)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding,
# Latin-1 should be a good guess
name = read(length).decode('latin-1', 'replace')
combined += length + 1
file.seek(size - combined, 1)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i += 1
return layers
def _maketile(file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize*ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(
("packbits", bbox, offset, layer)
)
for y in range(ysize):
offset = offset + i16(bytecount[i:i+2])
i += 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open(PsdImageFile.format, PsdImageFile, _accept)
Image.register_extension(PsdImageFile.format, ".psd")
| apache-2.0 |
bealdav/OpenUpgrade | addons/l10n_be_invoice_bba/partner.py | 379 | 2268 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Created by Luc De Meyer
# Copyright (c) 2010 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
from openerp.tools.translate import _
class res_partner(osv.osv):
""" add field to indicate default 'Communication Type' on customer invoices """
_inherit = 'res.partner'
def _get_comm_type(self, cr, uid, context=None):
res = self.pool.get('account.invoice')._get_reference_type(cr, uid,context=context)
return res
_columns = {
'out_inv_comm_type': fields.selection(_get_comm_type, 'Communication Type', change_default=True,
help='Select Default Communication Type for Outgoing Invoices.' ),
'out_inv_comm_algorithm': fields.selection([
('random','Random'),
('date','Date'),
('partner_ref','Customer Reference'),
], 'Communication Algorithm',
help='Select Algorithm to generate the Structured Communication on Outgoing Invoices.' ),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + \
['out_inv_comm_type', 'out_inv_comm_algorithm']
_default = {
'out_inv_comm_type': 'none',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
urandu/rethinkdb | external/v8_3.30.33.16/testing/gmock/scripts/generator/cpp/ast.py | 62 | 62426 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter():
if default:
del default[0] # Remove flag.
end = type_modifiers[-1].end
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter()
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter()
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| agpl-3.0 |
boyuegame/kbengine | kbe/src/lib/python/Lib/queue.py | 818 | 8835 | '''A multi-producer, multi-consumer queue.'''
try:
import threading
except ImportError:
import dummy_threading as threading
from collections import deque
from heapq import heappush, heappop
try:
from time import monotonic as time
except ImportError:
from time import time
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
'Exception raised by Queue.get(block=0)/get_nowait().'
pass
class Full(Exception):
'Exception raised by Queue.put(block=0)/put_nowait().'
pass
class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| lgpl-3.0 |
richliu/devstack | tools/generate-devstack-plugins-list.py | 5 | 2049 | #! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.openstack.org Gerrit API
# working directory
# * network access to https://git.openstack.org/cgit
import logging
import json
import requests
logging.basicConfig(level=logging.DEBUG)
url = 'https://review.openstack.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
def is_in_openstack_namespace(proj):
# only interested in openstack namespace (e.g. not retired
# stackforge, etc)
return proj.startswith('openstack/')
# Check if this project has a plugin file
def has_devstack_plugin(proj):
# Don't link in the deb packaging repos
if "openstack/deb-" in proj:
return False
r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj)
return r.status_code == 200
logging.debug("Getting project list from %s" % url)
r = requests.get(url)
projects = sorted(filter(is_in_openstack_namespace, json.loads(r.text[4:])))
logging.debug("Found %d projects" % len(projects))
found_plugins = filter(has_devstack_plugin, projects)
for project in found_plugins:
# strip of openstack/
print(project[10:])
| apache-2.0 |
h3biomed/ansible | lib/ansible/plugins/doc_fragments/ingate.py | 38 | 1571 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ingate Systems AB
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
client:
description:
- A dict object containing connection details.
suboptions:
version:
description:
- REST API version.
type: str
choices: [ v1 ]
default: v1
scheme:
description:
- Which HTTP protocol to use.
type: str
required: true
choices: [ http, https ]
address:
description:
- The hostname or IP address to the unit.
type: str
required: true
username:
description:
- The username of the REST API user.
type: str
required: true
password:
description:
- The password for the REST API user.
type: str
required: true
port:
description:
- Which HTTP(S) port to connect to.
type: int
timeout:
description:
- The timeout (in seconds) for REST API requests.
type: int
validate_certs:
description:
- Verify the unit's HTTPS certificate.
type: bool
default: yes
aliases: [ verify_ssl ]
notes:
- This module requires that the Ingate Python SDK is installed on the
host. To install the SDK use the pip command from your shell
C(pip install ingatesdk).
requirements:
- ingatesdk >= 1.0.6
'''
| gpl-3.0 |
karyon/django | tests/servers/tests.py | 33 | 6903 | # -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import contextlib
import errno
import os
import socket
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase, override_settings
from django.utils._os import upath
from django.utils.http import urlencode
from django.utils.six import text_type
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
@override_settings(ROOT_URLCONF='servers.urls', **TEST_SETTINGS)
class LiveServerBase(LiveServerTestCase):
available_apps = [
'servers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
fixtures = ['testdata.json']
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', socket.error)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
# put it in a list to prevent descriptor lookups in test
cls.live_server_url_test = [cls.live_server_url]
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(LiveServerAddress, cls).tearDownClass()
def test_live_server_url_is_class_property(self):
self.assertIsInstance(self.live_server_url_test[0], text_type)
self.assertEqual(self.live_server_url_test[0], self.live_server_url)
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/example_view/')) as f:
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/static/example_static_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_no_collectstatic_emulation(self):
"""
Test that LiveServerTestCase reports a 404 status code when HTTP client
tries to access a static file that isn't explicitly put under
STATIC_ROOT.
"""
try:
self.urlopen('/static/another_app/another_app_static_file.txt')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response (got %d)' % err.code)
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/media/example_media_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
with contextlib.closing(self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))) as f:
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/model_view/')) as f:
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
class LiveServerPort(LiveServerBase):
def test_port_bind(self):
"""
Each LiveServerTestCase binds to a unique port or fails to start a
server thread when run concurrently (#26011).
"""
TestCase = type(str("TestCase"), (LiveServerBase,), {})
try:
TestCase.setUpClass()
except socket.error as e:
if e.errno == errno.EADDRINUSE:
# We're out of ports, LiveServerTestCase correctly fails with
# a socket error.
return
# Unexpected error.
raise
try:
# We've acquired a port, ensure our server threads acquired
# different addresses.
self.assertNotEqual(
self.live_server_url, TestCase.live_server_url,
"Acquired duplicate server addresses for server threads: %s" % self.live_server_url
)
finally:
TestCase.tearDownClass()
| bsd-3-clause |
lucifurtun/myquotes | apps/quotes/models.py | 1 | 2273 | from django.conf import settings
from django.db import models
from django.utils import timezone
class NameAsStrMixin(object):
name = None
def __str__(self, *args, **kwargs):
return self.name
class UserTimeStampedModel(models.Model):
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
super(UserTimeStampedModel, self).save(*args, **kwargs)
class Meta:
get_latest_by = 'modified'
ordering = ('-modified', '-created',)
abstract = True
class Category(NameAsStrMixin, UserTimeStampedModel):
name = models.CharField(max_length=300)
class Meta:
verbose_name_plural = 'Categories'
unique_together = ('user', 'name')
class Tag(NameAsStrMixin, UserTimeStampedModel):
name = models.CharField(max_length=300)
class Meta:
unique_together = ('user', 'name')
class Author(NameAsStrMixin, UserTimeStampedModel):
name = models.CharField(max_length=300, unique=True)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='authors')
class Quote(UserTimeStampedModel):
title = models.CharField(max_length=400)
author = models.ForeignKey('Author', blank=True, null=True, on_delete=models.SET_NULL)
category = models.ForeignKey('Category', blank=True, null=True, on_delete=models.SET_NULL)
source = models.CharField(max_length=200, blank=True, null=True)
reference = models.CharField(max_length=100, blank=True, null=True)
tags = models.ManyToManyField('Tag', blank=True)
text = models.TextField()
private = models.BooleanField(default="False")
class Meta:
unique_together = (('user', 'title'), ('user', 'text'))
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.author and not self.author.users.filter(id=self.user.id).exists():
self.author.users.add(self.user)
def __str__(self):
return '{title} - {author}'.format(title=self.title, author=self.author)
| bsd-3-clause |
patriciolobos/desa8 | openerp/addons/mrp/res_config.py | 301 | 3684 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class mrp_config_settings(osv.osv_memory):
_name = 'mrp.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_mrp_repair': fields.boolean("Manage repairs of products ",
help='Allows to manage all product repairs.\n'
'* Add/remove products in the reparation\n'
'* Impact for stocks\n'
'* Invoicing (products and/or services)\n'
'* Warranty concept\n'
'* Repair quotation report\n'
'* Notes for the technician and for the final customer.\n'
'-This installs the module mrp_repair.'),
'module_mrp_operations': fields.boolean("Allow detailed planning of work order",
help='This allows to add state, date_start,date_stop in production order operation lines (in the "Work Centers" tab).\n'
'-This installs the module mrp_operations.'),
'module_mrp_byproduct': fields.boolean("Produce several products from one manufacturing order",
help='You can configure by-products in the bill of material.\n'
'Without this module: A + B + C -> D.\n'
'With this module: A + B + C -> D + E.\n'
'-This installs the module mrp_byproduct.'),
'group_mrp_routings': fields.boolean("Manage routings and work orders ",
implied_group='mrp.group_mrp_routings',
help='Routings allow you to create and manage the manufacturing operations that should be followed '
'within your work centers in order to produce a product. They are attached to bills of materials '
'that will define the required raw materials.'),
'group_mrp_properties': fields.boolean("Allow several bill of materials per products using properties",
implied_group='product.group_mrp_properties',
help="""The selection of the right Bill of Material to use will depend on the properties specified on the sales order and the Bill of Material."""),
#FIXME: Should be removed as module product_manufacturer has been removed
'module_product_manufacturer': fields.boolean("Define manufacturers on products ",
help='This allows you to define the following for a product:\n'
'* Manufacturer\n'
'* Manufacturer Product Name\n'
'* Manufacturer Product Code\n'
'* Product Attributes.\n'
'-This installs the module product_manufacturer.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
whelan957/leetcode | python3/Array/leetcode048_RotateImage.py | 1 | 1351 | # You are given an n x n 2D matrix representing an image.
# Rotate the image by 90 degrees (clockwise).
# Note:
# You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
# Example 1:
# Given input matrix =
# [
# [1,2,3],
# [4,5,6],
# [7,8,9]
# ],
# rotate the input matrix in-place such that it becomes:
# [
# [7,4,1],
# [8,5,2],
# [9,6,3]
# ]
# Example 2:
# Given input matrix =
# [
# [ 5, 1, 9,11],
# [ 2, 4, 8,10],
# [13, 3, 6, 7],
# [15,14,12,16]
# ],
# rotate the input matrix in-place such that it becomes:
# [
# [15,13, 2, 5],
# [14, 3, 4, 1],
# [12, 6, 8, 9],
# [16, 7,10,11]
# ]
class Solution:
# score:99.46(36ms)
# time:O(n^2)
# space:O(1)
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
# Turn upside down
for l, r in zip(range(len(matrix)), range(len(matrix)-1, 0, -1)):
if l >= r:
break
matrix[l], matrix[r] = matrix[r], matrix[l]
# Swap symmetry
for i in range(len(matrix)):
for j in range(i+1, len(matrix)):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
| gpl-3.0 |
zhjunlang/kbengine | kbe/src/lib/python/Lib/test/test_descr.py | 60 | 172168 | import builtins
import copyreg
import gc
import itertools
import math
import pickle
import sys
import types
import unittest
import weakref
from copy import deepcopy
from test import support
class OperatorsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.binops = {
'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'divmod': 'divmod',
'pow': '**',
'lshift': '<<',
'rshift': '>>',
'and': '&',
'xor': '^',
'or': '|',
'cmp': 'cmp',
'lt': '<',
'le': '<=',
'eq': '==',
'ne': '!=',
'gt': '>',
'ge': '>=',
}
for name, expr in list(self.binops.items()):
if expr.islower():
expr = expr + "(a, b)"
else:
expr = 'a %s b' % expr
self.binops[name] = expr
self.unops = {
'pos': '+',
'neg': '-',
'abs': 'abs',
'invert': '~',
'int': 'int',
'float': 'float',
'oct': 'oct',
'hex': 'hex',
}
for name, expr in list(self.unops.items()):
if expr.islower():
expr = expr + "(a)"
else:
expr = '%s a' % expr
self.unops[name] = expr
def unop_test(self, a, res, expr="len(a)", meth="__len__"):
d = {'a': a}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
# Find method in parent class
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a), res)
bm = getattr(a, meth)
self.assertEqual(bm(), res)
def binop_test(self, a, b, res, expr="a+b", meth="__add__"):
d = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
if meth == '__divmod__': pass
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, b), res)
bm = getattr(a, meth)
self.assertEqual(bm(b), res)
def sliceop_test(self, a, b, c, res, expr="a[b:c]", meth="__getitem__"):
d = {'a': a, 'b': b, 'c': c}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, slice(b, c)), res)
bm = getattr(a, meth)
self.assertEqual(bm(slice(b, c)), res)
def setop_test(self, a, b, res, stmt="a+=b", meth="__iadd__"):
d = {'a': deepcopy(a), 'b': b}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b)
self.assertEqual(d['a'], res)
def set2op_test(self, a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
d = {'a': deepcopy(a), 'b': b, 'c': c}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b, c)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b, c)
self.assertEqual(d['a'], res)
def setsliceop_test(self, a, b, c, d, res, stmt="a[b:c]=d", meth="__setitem__"):
dictionary = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec(stmt, dictionary)
self.assertEqual(dictionary['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
dictionary['a'] = deepcopy(a)
m(dictionary['a'], slice(b, c), d)
self.assertEqual(dictionary['a'], res)
dictionary['a'] = deepcopy(a)
bm = getattr(dictionary['a'], meth)
bm(slice(b, c), d)
self.assertEqual(dictionary['a'], res)
def test_lists(self):
# Testing list operations...
# Asserts are within individual test methods
self.binop_test([1], [2], [1,2], "a+b", "__add__")
self.binop_test([1,2,3], 2, 1, "b in a", "__contains__")
self.binop_test([1,2,3], 4, 0, "b in a", "__contains__")
self.binop_test([1,2,3], 1, 2, "a[b]", "__getitem__")
self.sliceop_test([1,2,3], 0, 2, [1,2], "a[b:c]", "__getitem__")
self.setop_test([1], [2], [1,2], "a+=b", "__iadd__")
self.setop_test([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
self.unop_test([1,2,3], 3, "len(a)", "__len__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
self.set2op_test([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
self.setsliceop_test([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d",
"__setitem__")
def test_dicts(self):
# Testing dict operations...
self.binop_test({1:2,3:4}, 1, 1, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 2, 0, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2, 3:4}
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in dict.__iter__(d):
l.append(i)
self.assertEqual(l, l1)
d = {1:2, 3:4}
self.unop_test(d, 2, "len(a)", "__len__")
self.assertEqual(eval(repr(d), {}), d)
self.assertEqual(eval(d.__repr__(), {}), d)
self.set2op_test({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c",
"__setitem__")
# Tests for unary and binary operators
def number_operators(self, a, b, skip=[]):
dict = {'a': a, 'b': b}
for name, expr in list(self.binops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.binop_test(a, b, res, expr, name)
for name, expr in list(self.unops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.unop_test(a, res, expr, name)
def test_ints(self):
# Testing int operations...
self.number_operators(100, 3)
# The following crashes in Python 2.2
self.assertEqual((1).__bool__(), 1)
self.assertEqual((0).__bool__(), 0)
# This returns 'NotImplemented' in Python 2.2
class C(int):
def __add__(self, other):
return NotImplemented
self.assertEqual(C(5), 5)
try:
C() + ""
except TypeError:
pass
else:
self.fail("NotImplemented should have caused TypeError")
def test_floats(self):
# Testing float operations...
self.number_operators(100.0, 3.0)
def test_complexes(self):
# Testing complex operations...
self.number_operators(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge',
'int', 'float',
'divmod', 'mod'])
class Number(complex):
__slots__ = ['prec']
def __new__(cls, *args, **kwds):
result = complex.__new__(cls, *args)
result.prec = kwds.get('prec', 12)
return result
def __repr__(self):
prec = self.prec
if self.imag == 0.0:
return "%.*g" % (prec, self.real)
if self.real == 0.0:
return "%.*gj" % (prec, self.imag)
return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
__str__ = __repr__
a = Number(3.14, prec=6)
self.assertEqual(repr(a), "3.14")
self.assertEqual(a.prec, 6)
a = Number(a, prec=2)
self.assertEqual(repr(a), "3.1")
self.assertEqual(a.prec, 2)
a = Number(234.5)
self.assertEqual(repr(a), "234.5")
self.assertEqual(a.prec, 12)
def test_explicit_reverse_methods(self):
# see issue 9930
self.assertEqual(complex.__radd__(3j, 4.0), complex(4.0, 3.0))
self.assertEqual(float.__rsub__(3.0, 1), -2.0)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_lists(self):
# Testing spamlist operations...
import copy, xxsubtype as spam
def spamlist(l, memo=None):
import xxsubtype as spam
return spam.spamlist(l)
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamlist] = spamlist
self.binop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b",
"__add__")
self.binop_test(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
self.sliceop_test(spamlist([1,2,3]), 0, 2, spamlist([1,2]), "a[b:c]",
"__getitem__")
self.setop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+=b",
"__iadd__")
self.setop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b",
"__imul__")
self.unop_test(spamlist([1,2,3]), 3, "len(a)", "__len__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b",
"__mul__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a",
"__rmul__")
self.set2op_test(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c",
"__setitem__")
self.setsliceop_test(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
spamlist([1,5,6,4]), "a[b:c]=d", "__setitem__")
# Test subclassing
class C(spam.spamlist):
def foo(self): return 1
a = C()
self.assertEqual(a, [])
self.assertEqual(a.foo(), 1)
a.append(100)
self.assertEqual(a, [100])
self.assertEqual(a.getstate(), 0)
a.setstate(42)
self.assertEqual(a.getstate(), 42)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_dicts(self):
# Testing spamdict operations...
import copy, xxsubtype as spam
def spamdict(d, memo=None):
import xxsubtype as spam
sd = spam.spamdict()
for k, v in list(d.items()):
sd[k] = v
return sd
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamdict] = spamdict
self.binop_test(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
d = spamdict({1:2,3:4})
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in type(spamdict({})).__iter__(d):
l.append(i)
self.assertEqual(l, l1)
straightd = {1:2, 3:4}
spamd = spamdict(straightd)
self.unop_test(spamd, 2, "len(a)", "__len__")
self.unop_test(spamd, repr(straightd), "repr(a)", "__repr__")
self.set2op_test(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
"a[b]=c", "__setitem__")
# Test subclassing
class C(spam.spamdict):
def foo(self): return 1
a = C()
self.assertEqual(list(a.items()), [])
self.assertEqual(a.foo(), 1)
a['foo'] = 'bar'
self.assertEqual(list(a.items()), [('foo', 'bar')])
self.assertEqual(a.getstate(), 0)
a.setstate(100)
self.assertEqual(a.getstate(), 100)
class ClassPropertiesAndMethods(unittest.TestCase):
def assertHasAttr(self, obj, name):
self.assertTrue(hasattr(obj, name),
'%r has no attribute %r' % (obj, name))
def assertNotHasAttr(self, obj, name):
self.assertFalse(hasattr(obj, name),
'%r has unexpected attribute %r' % (obj, name))
def test_python_dicts(self):
# Testing Python subclass of dict...
self.assertTrue(issubclass(dict, dict))
self.assertIsInstance({}, dict)
d = dict()
self.assertEqual(d, {})
self.assertIs(d.__class__, dict)
self.assertIsInstance(d, dict)
class C(dict):
state = -1
def __init__(self_local, *a, **kw):
if a:
self.assertEqual(len(a), 1)
self_local.state = a[0]
if kw:
for k, v in list(kw.items()):
self_local[v] = k
def __getitem__(self, key):
return self.get(key, 0)
def __setitem__(self_local, key, value):
self.assertIsInstance(key, type(0))
dict.__setitem__(self_local, key, value)
def setstate(self, state):
self.state = state
def getstate(self):
return self.state
self.assertTrue(issubclass(C, dict))
a1 = C(12)
self.assertEqual(a1.state, 12)
a2 = C(foo=1, bar=2)
self.assertEqual(a2[1] == 'foo' and a2[2], 'bar')
a = C()
self.assertEqual(a.state, -1)
self.assertEqual(a.getstate(), -1)
a.setstate(0)
self.assertEqual(a.state, 0)
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.state, 10)
self.assertEqual(a.getstate(), 10)
self.assertEqual(a[42], 0)
a[42] = 24
self.assertEqual(a[42], 24)
N = 50
for i in range(N):
a[i] = C()
for j in range(N):
a[i][j] = i*j
for i in range(N):
for j in range(N):
self.assertEqual(a[i][j], i*j)
def test_python_lists(self):
# Testing Python subclass of list...
class C(list):
def __getitem__(self, i):
if isinstance(i, slice):
return i.start, i.stop
return list.__getitem__(self, i) + 100
a = C()
a.extend([0,1,2])
self.assertEqual(a[0], 100)
self.assertEqual(a[1], 101)
self.assertEqual(a[2], 102)
self.assertEqual(a[100:200], (100,200))
def test_metaclass(self):
# Testing metaclasses...
class C(metaclass=type):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class _metaclass(type):
def myself(cls): return cls
class D(metaclass=_metaclass):
pass
self.assertEqual(D.myself(), D)
d = D()
self.assertEqual(d.__class__, D)
class M1(type):
def __new__(cls, name, bases, dict):
dict['__spam__'] = 1
return type.__new__(cls, name, bases, dict)
class C(metaclass=M1):
pass
self.assertEqual(C.__spam__, 1)
c = C()
self.assertEqual(c.__spam__, 1)
class _instance(object):
pass
class M2(object):
@staticmethod
def __new__(cls, name, bases, dict):
self = object.__new__(cls)
self.name = name
self.bases = bases
self.dict = dict
return self
def __call__(self):
it = _instance()
# Early binding of methods
for key in self.dict:
if key.startswith("__"):
continue
setattr(it, key, self.dict[key].__get__(it, self))
return it
class C(metaclass=M2):
def spam(self):
return 42
self.assertEqual(C.name, 'C')
self.assertEqual(C.bases, ())
self.assertIn('spam', C.dict)
c = C()
self.assertEqual(c.spam(), 42)
# More metaclass examples
class autosuper(type):
# Automatically add __super to the class
# This trick only works for dynamic classes
def __new__(metaclass, name, bases, dict):
cls = super(autosuper, metaclass).__new__(metaclass,
name, bases, dict)
# Name mangling for __super removes leading underscores
while name[:1] == "_":
name = name[1:]
if name:
name = "_%s__super" % name
else:
name = "__super"
setattr(cls, name, super(cls))
return cls
class A(metaclass=autosuper):
def meth(self):
return "A"
class B(A):
def meth(self):
return "B" + self.__super.meth()
class C(A):
def meth(self):
return "C" + self.__super.meth()
class D(C, B):
def meth(self):
return "D" + self.__super.meth()
self.assertEqual(D().meth(), "DCBA")
class E(B, C):
def meth(self):
return "E" + self.__super.meth()
self.assertEqual(E().meth(), "EBCA")
class autoproperty(type):
# Automatically create property attributes when methods
# named _get_x and/or _set_x are found
def __new__(metaclass, name, bases, dict):
hits = {}
for key, val in dict.items():
if key.startswith("_get_"):
key = key[5:]
get, set = hits.get(key, (None, None))
get = val
hits[key] = get, set
elif key.startswith("_set_"):
key = key[5:]
get, set = hits.get(key, (None, None))
set = val
hits[key] = get, set
for key, (get, set) in hits.items():
dict[key] = property(get, set)
return super(autoproperty, metaclass).__new__(metaclass,
name, bases, dict)
class A(metaclass=autoproperty):
def _get_x(self):
return -self.__x
def _set_x(self, x):
self.__x = -x
a = A()
self.assertNotHasAttr(a, "x")
a.x = 12
self.assertEqual(a.x, 12)
self.assertEqual(a._A__x, -12)
class multimetaclass(autoproperty, autosuper):
# Merge of multiple cooperating metaclasses
pass
class A(metaclass=multimetaclass):
def _get_x(self):
return "A"
class B(A):
def _get_x(self):
return "B" + self.__super._get_x()
class C(A):
def _get_x(self):
return "C" + self.__super._get_x()
class D(C, B):
def _get_x(self):
return "D" + self.__super._get_x()
self.assertEqual(D().x, "DCBA")
# Make sure type(x) doesn't call x.__class__.__init__
class T(type):
counter = 0
def __init__(self, *args):
T.counter += 1
class C(metaclass=T):
pass
self.assertEqual(T.counter, 1)
a = C()
self.assertEqual(type(a), C)
self.assertEqual(T.counter, 1)
class C(object): pass
c = C()
try: c()
except TypeError: pass
else: self.fail("calling object w/o call method should raise "
"TypeError")
# Testing code to find most derived baseclass
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
class B(object):
pass
class C(object, metaclass=A):
pass
# The most derived metaclass of D is A rather than type.
class D(B, C):
pass
self.assertIs(A, type(D))
# issue1294232: correct metaclass calculation
new_calls = [] # to check the order of __new__ calls
class AMeta(type):
@staticmethod
def __new__(mcls, name, bases, ns):
new_calls.append('AMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
return {}
class BMeta(AMeta):
@staticmethod
def __new__(mcls, name, bases, ns):
new_calls.append('BMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
ns = super().__prepare__(name, bases)
ns['BMeta_was_here'] = True
return ns
class A(metaclass=AMeta):
pass
self.assertEqual(['AMeta'], new_calls)
new_calls.clear()
class B(metaclass=BMeta):
pass
# BMeta.__new__ calls AMeta.__new__ with super:
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
class C(A, B):
pass
# The most derived metaclass is BMeta:
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
# BMeta.__prepare__ should've been called:
self.assertIn('BMeta_was_here', C.__dict__)
# The order of the bases shouldn't matter:
class C2(B, A):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', C2.__dict__)
# Check correct metaclass calculation when a metaclass is declared:
class D(C, metaclass=type):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', D.__dict__)
class E(C, metaclass=AMeta):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', E.__dict__)
# Special case: the given metaclass isn't a class,
# so there is no metaclass calculation.
marker = object()
def func(*args, **kwargs):
return marker
class X(metaclass=func):
pass
class Y(object, metaclass=func):
pass
class Z(D, metaclass=func):
pass
self.assertIs(marker, X)
self.assertIs(marker, Y)
self.assertIs(marker, Z)
# The given metaclass is a class,
# but not a descendant of type.
prepare_calls = [] # to track __prepare__ calls
class ANotMeta:
def __new__(mcls, *args, **kwargs):
new_calls.append('ANotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('ANotMeta')
return {}
class BNotMeta(ANotMeta):
def __new__(mcls, *args, **kwargs):
new_calls.append('BNotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('BNotMeta')
return super().__prepare__(name, bases)
class A(metaclass=ANotMeta):
pass
self.assertIs(ANotMeta, type(A))
self.assertEqual(['ANotMeta'], prepare_calls)
prepare_calls.clear()
self.assertEqual(['ANotMeta'], new_calls)
new_calls.clear()
class B(metaclass=BNotMeta):
pass
self.assertIs(BNotMeta, type(B))
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
class C(A, B):
pass
self.assertIs(BNotMeta, type(C))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class C2(B, A):
pass
self.assertIs(BNotMeta, type(C2))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
# This is a TypeError, because of a metaclass conflict:
# BNotMeta is neither a subclass, nor a superclass of type
with self.assertRaises(TypeError):
class D(C, metaclass=type):
pass
class E(C, metaclass=ANotMeta):
pass
self.assertIs(BNotMeta, type(E))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class F(object(), C):
pass
self.assertIs(BNotMeta, type(F))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class F2(C, object()):
pass
self.assertIs(BNotMeta, type(F2))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
# TypeError: BNotMeta is neither a
# subclass, nor a superclass of int
with self.assertRaises(TypeError):
class X(C, int()):
pass
with self.assertRaises(TypeError):
class X(int(), C):
pass
def test_module_subclasses(self):
# Testing Python subclass of module...
log = []
MT = type(sys)
class MM(MT):
def __init__(self, name):
MT.__init__(self, name)
def __getattribute__(self, name):
log.append(("getattr", name))
return MT.__getattribute__(self, name)
def __setattr__(self, name, value):
log.append(("setattr", name, value))
MT.__setattr__(self, name, value)
def __delattr__(self, name):
log.append(("delattr", name))
MT.__delattr__(self, name)
a = MM("a")
a.foo = 12
x = a.foo
del a.foo
self.assertEqual(log, [("setattr", "foo", 12),
("getattr", "foo"),
("delattr", "foo")])
# http://python.org/sf/1174712
try:
class Module(types.ModuleType, str):
pass
except TypeError:
pass
else:
self.fail("inheriting from ModuleType and str at the same time "
"should fail")
def test_multiple_inheritance(self):
# Testing multiple inheritance...
class C(object):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class D(dict, C):
def __init__(self):
type({}).__init__(self)
C.__init__(self)
d = D()
self.assertEqual(list(d.keys()), [])
d["hello"] = "world"
self.assertEqual(list(d.items()), [("hello", "world")])
self.assertEqual(d["hello"], "world")
self.assertEqual(d.getstate(), 0)
d.setstate(10)
self.assertEqual(d.getstate(), 10)
self.assertEqual(D.__mro__, (D, dict, C, object))
# SF bug #442833
class Node(object):
def __int__(self):
return int(self.foo())
def foo(self):
return "23"
class Frag(Node, list):
def foo(self):
return "42"
self.assertEqual(Node().__int__(), 23)
self.assertEqual(int(Node()), 23)
self.assertEqual(Frag().__int__(), 42)
self.assertEqual(int(Frag()), 42)
def test_diamond_inheritence(self):
# Testing multiple inheritance special cases...
class A(object):
def spam(self): return "A"
self.assertEqual(A().spam(), "A")
class B(A):
def boo(self): return "B"
def spam(self): return "B"
self.assertEqual(B().spam(), "B")
self.assertEqual(B().boo(), "B")
class C(A):
def boo(self): return "C"
self.assertEqual(C().spam(), "A")
self.assertEqual(C().boo(), "C")
class D(B, C): pass
self.assertEqual(D().spam(), "B")
self.assertEqual(D().boo(), "B")
self.assertEqual(D.__mro__, (D, B, C, A, object))
class E(C, B): pass
self.assertEqual(E().spam(), "B")
self.assertEqual(E().boo(), "C")
self.assertEqual(E.__mro__, (E, C, B, A, object))
# MRO order disagreement
try:
class F(D, E): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (F)")
try:
class G(E, D): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (G)")
# see thread python-dev/2002-October/029035.html
def test_ex5_from_c3_switch(self):
# Testing ex5 from C3 switch discussion...
class A(object): pass
class B(object): pass
class C(object): pass
class X(A): pass
class Y(A): pass
class Z(X,B,Y,C): pass
self.assertEqual(Z.__mro__, (Z, X, B, Y, A, C, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_monotonicity(self):
# Testing MRO monotonicity...
class Boat(object): pass
class DayBoat(Boat): pass
class WheelBoat(Boat): pass
class EngineLess(DayBoat): pass
class SmallMultihull(DayBoat): pass
class PedalWheelBoat(EngineLess,WheelBoat): pass
class SmallCatamaran(SmallMultihull): pass
class Pedalo(PedalWheelBoat,SmallCatamaran): pass
self.assertEqual(PedalWheelBoat.__mro__,
(PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat, object))
self.assertEqual(SmallCatamaran.__mro__,
(SmallCatamaran, SmallMultihull, DayBoat, Boat, object))
self.assertEqual(Pedalo.__mro__,
(Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran,
SmallMultihull, DayBoat, WheelBoat, Boat, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_consistency_with_epg(self):
# Testing consistency with EPG...
class Pane(object): pass
class ScrollingMixin(object): pass
class EditingMixin(object): pass
class ScrollablePane(Pane,ScrollingMixin): pass
class EditablePane(Pane,EditingMixin): pass
class EditableScrollablePane(ScrollablePane,EditablePane): pass
self.assertEqual(EditableScrollablePane.__mro__,
(EditableScrollablePane, ScrollablePane, EditablePane, Pane,
ScrollingMixin, EditingMixin, object))
def test_mro_disagreement(self):
# Testing error messages for MRO disagreement...
mro_err_msg = """Cannot create a consistent method resolution
order (MRO) for bases """
def raises(exc, expected, callable, *args):
try:
callable(*args)
except exc as msg:
# the exact msg is generally considered an impl detail
if support.check_impl_detail():
if not str(msg).startswith(expected):
self.fail("Message %r, expected %r" %
(str(msg), expected))
else:
self.fail("Expected %s" % exc)
class A(object): pass
class B(A): pass
class C(object): pass
# Test some very simple errors
raises(TypeError, "duplicate base class A",
type, "X", (A, A), {})
raises(TypeError, mro_err_msg,
type, "X", (A, B), {})
raises(TypeError, mro_err_msg,
type, "X", (A, C, B), {})
# Test a slightly more complex error
class GridLayout(object): pass
class HorizontalGrid(GridLayout): pass
class VerticalGrid(GridLayout): pass
class HVGrid(HorizontalGrid, VerticalGrid): pass
class VHGrid(VerticalGrid, HorizontalGrid): pass
raises(TypeError, mro_err_msg,
type, "ConfusedGrid", (HVGrid, VHGrid), {})
def test_object_class(self):
# Testing object class...
a = object()
self.assertEqual(a.__class__, object)
self.assertEqual(type(a), object)
b = object()
self.assertNotEqual(a, b)
self.assertNotHasAttr(a, "foo")
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
self.fail("object() should not allow setting a foo attribute")
self.assertNotHasAttr(object(), "__dict__")
class Cdict(object):
pass
x = Cdict()
self.assertEqual(x.__dict__, {})
x.foo = 1
self.assertEqual(x.foo, 1)
self.assertEqual(x.__dict__, {'foo': 1})
def test_slots(self):
# Testing __slots__...
class C0(object):
__slots__ = []
x = C0()
self.assertNotHasAttr(x, "__dict__")
self.assertNotHasAttr(x, "foo")
class C1(object):
__slots__ = ['a']
x = C1()
self.assertNotHasAttr(x, "__dict__")
self.assertNotHasAttr(x, "a")
x.a = 1
self.assertEqual(x.a, 1)
x.a = None
self.assertEqual(x.a, None)
del x.a
self.assertNotHasAttr(x, "a")
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
self.assertNotHasAttr(x, "__dict__")
self.assertNotHasAttr(x, 'a')
self.assertNotHasAttr(x, 'b')
self.assertNotHasAttr(x, 'c')
x.a = 1
x.b = 2
x.c = 3
self.assertEqual(x.a, 1)
self.assertEqual(x.b, 2)
self.assertEqual(x.c, 3)
class C4(object):
"""Validate name mangling"""
__slots__ = ['__a']
def __init__(self, value):
self.__a = value
def get(self):
return self.__a
x = C4(5)
self.assertNotHasAttr(x, '__dict__')
self.assertNotHasAttr(x, '__a')
self.assertEqual(x.get(), 5)
try:
x.__a = 6
except AttributeError:
pass
else:
self.fail("Double underscored names not mangled")
# Make sure slot names are proper identifiers
try:
class C(object):
__slots__ = [None]
except TypeError:
pass
else:
self.fail("[None] slots not caught")
try:
class C(object):
__slots__ = ["foo bar"]
except TypeError:
pass
else:
self.fail("['foo bar'] slots not caught")
try:
class C(object):
__slots__ = ["foo\0bar"]
except TypeError:
pass
else:
self.fail("['foo\\0bar'] slots not caught")
try:
class C(object):
__slots__ = ["1"]
except TypeError:
pass
else:
self.fail("['1'] slots not caught")
try:
class C(object):
__slots__ = [""]
except TypeError:
pass
else:
self.fail("[''] slots not caught")
class C(object):
__slots__ = ["a", "a_b", "_a", "A0123456789Z"]
# XXX(nnorwitz): was there supposed to be something tested
# from the class above?
# Test a single string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# Test unicode slot names
# Test a single unicode string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# _unicode_to_string used to modify slots in certain circumstances
slots = ("foo", "bar")
class C(object):
__slots__ = slots
x = C()
x.foo = 5
self.assertEqual(x.foo, 5)
self.assertIs(type(slots[0]), str)
# this used to leak references
try:
class C(object):
__slots__ = [chr(128)]
except (TypeError, UnicodeEncodeError):
pass
else:
self.fail("[chr(128)] slots not caught")
# Test leaks
class Counted(object):
counter = 0 # counts the number of instances alive
def __init__(self):
Counted.counter += 1
def __del__(self):
Counted.counter -= 1
class C(object):
__slots__ = ['a', 'b', 'c']
x = C()
x.a = Counted()
x.b = Counted()
x.c = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class D(C):
pass
x = D()
x.a = Counted()
x.z = Counted()
self.assertEqual(Counted.counter, 2)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class E(D):
__slots__ = ['e']
x = E()
x.a = Counted()
x.z = Counted()
x.e = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test cyclical leaks [SF bug 519621]
class F(object):
__slots__ = ['a', 'b']
s = F()
s.a = [Counted(), s]
self.assertEqual(Counted.counter, 1)
s = None
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
if hasattr(gc, 'get_objects'):
class G(object):
def __eq__(self, other):
return False
g = G()
orig_objects = len(gc.get_objects())
for i in range(10):
g==g
new_objects = len(gc.get_objects())
self.assertEqual(orig_objects, new_objects)
class H(object):
__slots__ = ['a', 'b']
def __init__(self):
self.a = 1
self.b = 2
def __del__(self_):
self.assertEqual(self_.a, 1)
self.assertEqual(self_.b, 2)
with support.captured_output('stderr') as s:
h = H()
del h
self.assertEqual(s.getvalue(), '')
class X(object):
__slots__ = "a"
with self.assertRaises(AttributeError):
del X().a
def test_slots_special(self):
# Testing __dict__ and __weakref__ in __slots__...
class D(object):
__slots__ = ["__dict__"]
a = D()
self.assertHasAttr(a, "__dict__")
self.assertNotHasAttr(a, "__weakref__")
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class W(object):
__slots__ = ["__weakref__"]
a = W()
self.assertHasAttr(a, "__weakref__")
self.assertNotHasAttr(a, "__dict__")
try:
a.foo = 42
except AttributeError:
pass
else:
self.fail("shouldn't be allowed to set a.foo")
class C1(W, D):
__slots__ = []
a = C1()
self.assertHasAttr(a, "__dict__")
self.assertHasAttr(a, "__weakref__")
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class C2(D, W):
__slots__ = []
a = C2()
self.assertHasAttr(a, "__dict__")
self.assertHasAttr(a, "__weakref__")
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
def test_slots_descriptor(self):
# Issue2115: slot descriptors did not correctly check
# the type of the given object
import abc
class MyABC(metaclass=abc.ABCMeta):
__slots__ = "a"
class Unrelated(object):
pass
MyABC.register(Unrelated)
u = Unrelated()
self.assertIsInstance(u, MyABC)
# This used to crash
self.assertRaises(TypeError, MyABC.a.__set__, u, 3)
def test_dynamics(self):
# Testing class attribute propagation...
class D(object):
pass
class E(D):
pass
class F(D):
pass
D.foo = 1
self.assertEqual(D.foo, 1)
# Test that dynamic attributes are inherited
self.assertEqual(E.foo, 1)
self.assertEqual(F.foo, 1)
# Test dynamic instances
class C(object):
pass
a = C()
self.assertNotHasAttr(a, "foobar")
C.foobar = 2
self.assertEqual(a.foobar, 2)
C.method = lambda self: 42
self.assertEqual(a.method(), 42)
C.__repr__ = lambda self: "C()"
self.assertEqual(repr(a), "C()")
C.__int__ = lambda self: 100
self.assertEqual(int(a), 100)
self.assertEqual(a.foobar, 2)
self.assertNotHasAttr(a, "spam")
def mygetattr(self, name):
if name == "spam":
return "spam"
raise AttributeError
C.__getattr__ = mygetattr
self.assertEqual(a.spam, "spam")
a.new = 12
self.assertEqual(a.new, 12)
def mysetattr(self, name, value):
if name == "spam":
raise AttributeError
return object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
try:
a.spam = "not spam"
except AttributeError:
pass
else:
self.fail("expected AttributeError")
self.assertEqual(a.spam, "spam")
class D(C):
pass
d = D()
d.foo = 1
self.assertEqual(d.foo, 1)
# Test handling of int*seq and seq*int
class I(int):
pass
self.assertEqual("a"*I(2), "aa")
self.assertEqual(I(2)*"a", "aa")
self.assertEqual(2*I(3), 6)
self.assertEqual(I(3)*2, 6)
self.assertEqual(I(3)*I(2), 6)
# Test comparison of classes with dynamic metaclasses
class dynamicmetaclass(type):
pass
class someclass(metaclass=dynamicmetaclass):
pass
self.assertNotEqual(someclass, object)
def test_errors(self):
# Testing errors...
try:
class C(list, dict):
pass
except TypeError:
pass
else:
self.fail("inheritance from both list and dict should be illegal")
try:
class C(object, None):
pass
except TypeError:
pass
else:
self.fail("inheritance from non-type should be illegal")
class Classic:
pass
try:
class C(type(len)):
pass
except TypeError:
pass
else:
self.fail("inheritance from CFunction should be illegal")
try:
class C(object):
__slots__ = 1
except TypeError:
pass
else:
self.fail("__slots__ = 1 should be illegal")
try:
class C(object):
__slots__ = [1]
except TypeError:
pass
else:
self.fail("__slots__ = [1] should be illegal")
class M1(type):
pass
class M2(type):
pass
class A1(object, metaclass=M1):
pass
class A2(object, metaclass=M2):
pass
try:
class B(A1, A2):
pass
except TypeError:
pass
else:
self.fail("finding the most derived metaclass should have failed")
def test_classmethods(self):
# Testing class methods...
class C(object):
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
# Test for a specific crash (SF bug 528132)
def f(cls, arg): return (cls, arg)
ff = classmethod(f)
self.assertEqual(ff.__get__(0, int)(42), (int, 42))
self.assertEqual(ff.__get__(0)(42), (int, 42))
# Test super() with classmethods (SF bug 535444)
self.assertEqual(C.goo.__self__, C)
self.assertEqual(D.goo.__self__, D)
self.assertEqual(super(D,D).goo.__self__, D)
self.assertEqual(super(D,d).goo.__self__, D)
self.assertEqual(super(D,D).goo(), (D,))
self.assertEqual(super(D,d).goo(), (D,))
# Verify that a non-callable will raise
meth = classmethod(1).__get__(1)
self.assertRaises(TypeError, meth)
# Verify that classmethod() doesn't allow keyword args
try:
classmethod(f, kw=1)
except TypeError:
pass
else:
self.fail("classmethod shouldn't accept keyword args")
cm = classmethod(f)
self.assertEqual(cm.__dict__, {})
cm.x = 42
self.assertEqual(cm.x, 42)
self.assertEqual(cm.__dict__, {"x" : 42})
del cm.x
self.assertNotHasAttr(cm, "x")
@support.impl_detail("the module 'xxsubtype' is internal")
def test_classmethods_in_c(self):
# Testing C-based class methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {'abc': 123}
x, a1, d1 = spam.spamlist.classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d1 = spam.spamlist().classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
spam_cm = spam.spamlist.__dict__['classmeth']
x2, a2, d2 = spam_cm(spam.spamlist, *a, **d)
self.assertEqual(x2, spam.spamlist)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
class SubSpam(spam.spamlist): pass
x2, a2, d2 = spam_cm(SubSpam, *a, **d)
self.assertEqual(x2, SubSpam)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
with self.assertRaises(TypeError):
spam_cm()
with self.assertRaises(TypeError):
spam_cm(spam.spamlist())
with self.assertRaises(TypeError):
spam_cm(list)
def test_staticmethods(self):
# Testing static methods...
class C(object):
def foo(*a): return a
goo = staticmethod(foo)
c = C()
self.assertEqual(C.goo(1), (1,))
self.assertEqual(c.goo(1), (1,))
self.assertEqual(c.foo(1), (c, 1,))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (1,))
self.assertEqual(d.goo(1), (1,))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
sm = staticmethod(None)
self.assertEqual(sm.__dict__, {})
sm.x = 42
self.assertEqual(sm.x, 42)
self.assertEqual(sm.__dict__, {"x" : 42})
del sm.x
self.assertNotHasAttr(sm, "x")
@support.impl_detail("the module 'xxsubtype' is internal")
def test_staticmethods_in_c(self):
# Testing C-based static methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {"abc": 123}
x, a1, d1 = spam.spamlist.staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d2 = spam.spamlist().staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_classic(self):
# Testing classic classes...
class C:
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
class E: # *not* subclassing from C
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C())).startswith("<bound method "))
def test_compattr(self):
# Testing computed attributes...
class C(object):
class computed_attribute(object):
def __init__(self, get, set=None, delete=None):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=None):
return self.__get(obj)
def __set__(self, obj, value):
return self.__set(obj, value)
def __delete__(self, obj):
return self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
return x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
del self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
self.assertEqual(a.x, 0)
self.assertEqual(a.x, 1)
a.x = 10
self.assertEqual(a.x, 10)
self.assertEqual(a.x, 11)
del a.x
self.assertNotHasAttr(a, 'x')
def test_newslots(self):
# Testing __new__ slot override...
class C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
return self
def __init__(self):
self.foo = self.foo + 2
a = C()
self.assertEqual(a.foo, 3)
self.assertEqual(a.__class__, C)
class D(C):
pass
b = D()
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
def test_altmro(self):
# Testing mro() and overriding it...
class A(object):
def f(self): return "A"
class B(A):
pass
class C(A):
def f(self): return "C"
class D(B, C):
pass
self.assertEqual(D.mro(), [D, B, C, A, object])
self.assertEqual(D.__mro__, (D, B, C, A, object))
self.assertEqual(D().f(), "C")
class PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
return L
class X(D,B,C,A, metaclass=PerverseMetaType):
pass
self.assertEqual(X.__mro__, (object, A, C, B, D, X))
self.assertEqual(X().f(), "A")
try:
class _metaclass(type):
def mro(self):
return [self, dict, object]
class X(object, metaclass=_metaclass):
pass
# In CPython, the class creation above already raises
# TypeError, as a protection against the fact that
# instances of X would segfault it. In other Python
# implementations it would be ok to let the class X
# be created, but instead get a clean TypeError on the
# __setitem__ below.
x = object.__new__(X)
x[5] = 6
except TypeError:
pass
else:
self.fail("devious mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return [1]
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-class mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return 1
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-sequence mro() return not caught")
def test_overloading(self):
# Testing operator overloading...
class B(object):
"Intermediate class because object doesn't have a __setattr__"
class C(B):
def __getattr__(self, name):
if name == "foo":
return ("getattr", name)
else:
raise AttributeError
def __setattr__(self, name, value):
if name == "foo":
self.setattr = (name, value)
else:
return B.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "foo":
self.delattr = name
else:
return B.__delattr__(self, name)
def __getitem__(self, key):
return ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
a = C()
self.assertEqual(a.foo, ("getattr", "foo"))
a.foo = 12
self.assertEqual(a.setattr, ("foo", 12))
del a.foo
self.assertEqual(a.delattr, "foo")
self.assertEqual(a[12], ("getitem", 12))
a[12] = 21
self.assertEqual(a.setitem, (12, 21))
del a[12]
self.assertEqual(a.delitem, 12)
self.assertEqual(a[0:10], ("getitem", slice(0, 10)))
a[0:10] = "foo"
self.assertEqual(a.setitem, (slice(0, 10), "foo"))
del a[0:10]
self.assertEqual(a.delitem, (slice(0, 10)))
def test_methods(self):
# Testing methods...
class C(object):
def __init__(self, x):
self.x = x
def foo(self):
return self.x
c1 = C(1)
self.assertEqual(c1.foo(), 1)
class D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
self.assertEqual(d2.foo(), 2)
self.assertEqual(d2.boo(), 2)
self.assertEqual(d2.goo(), 1)
class E(object):
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def test_special_method_lookup(self):
# The lookup of special methods bypasses __getattr__ and
# __getattribute__, but they still can be descriptors.
def run_context(manager):
with manager:
pass
def iden(self):
return self
def hello(self):
return b"hello"
def empty_seq(self):
return []
def zero(self):
return 0
def complex_num(self):
return 1j
def stop(self):
raise StopIteration
def return_true(self, thing=None):
return True
def do_isinstance(obj):
return isinstance(int, obj)
def do_issubclass(obj):
return issubclass(int, obj)
def do_dict_missing(checker):
class DictSub(checker.__class__, dict):
pass
self.assertEqual(DictSub()["hi"], 4)
def some_number(self_, key):
self.assertEqual(key, "hi")
return 4
def swallow(*args): pass
def format_impl(self, spec):
return "hello"
# It would be nice to have every special method tested here, but I'm
# only listing the ones I can remember outside of typeobject.c, since it
# does it right.
specials = [
("__bytes__", bytes, hello, set(), {}),
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "__next__" : stop}),
("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
("__subclasscheck__", do_issubclass, return_true,
set(("__bases__",)), {}),
("__enter__", run_context, iden, set(), {"__exit__" : swallow}),
("__exit__", run_context, swallow, set(), {"__enter__" : iden}),
("__complex__", complex, complex_num, set(), {}),
("__format__", format, format_impl, set(), {}),
("__floor__", math.floor, zero, set(), {}),
("__trunc__", math.trunc, zero, set(), {}),
("__trunc__", int, zero, set(), {}),
("__ceil__", math.ceil, zero, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
("__round__", round, zero, set(), {}),
]
class Checker(object):
def __getattr__(self, attr, test=self):
test.fail("__getattr__ called with {0}".format(attr))
def __getattribute__(self, attr, test=self):
if attr not in ok:
test.fail("__getattribute__ called with {0}".format(attr))
return object.__getattribute__(self, attr)
class SpecialDescr(object):
def __init__(self, impl):
self.impl = impl
def __get__(self, obj, owner):
record.append(1)
return self.impl.__get__(obj, owner)
class MyException(Exception):
pass
class ErrDescr(object):
def __get__(self, obj, owner):
raise MyException
for name, runner, meth_impl, ok, env in specials:
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, meth_impl)
runner(X())
record = []
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, SpecialDescr(meth_impl))
runner(X())
self.assertEqual(record, [1], name)
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, ErrDescr())
self.assertRaises(MyException, runner, X())
def test_specials(self):
# Testing special operators...
# Test operators like __hash__ for which a built-in default exists
# Test the default behavior for static classes
class C(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
c1 = C()
c2 = C()
self.assertFalse(not c1)
self.assertNotEqual(id(c1), id(c2))
hash(c1)
hash(c2)
self.assertEqual(c1, c1)
self.assertTrue(c1 != c2)
self.assertFalse(c1 != c1)
self.assertFalse(c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertGreaterEqual(str(c1).find('C object at '), 0)
self.assertEqual(str(c1), repr(c1))
self.assertNotIn(-1, c1)
for i in range(10):
self.assertIn(i, c1)
self.assertNotIn(10, c1)
# Test the default behavior for dynamic classes
class D(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
d1 = D()
d2 = D()
self.assertFalse(not d1)
self.assertNotEqual(id(d1), id(d2))
hash(d1)
hash(d2)
self.assertEqual(d1, d1)
self.assertNotEqual(d1, d2)
self.assertFalse(d1 != d1)
self.assertFalse(d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertGreaterEqual(str(d1).find('D object at '), 0)
self.assertEqual(str(d1), repr(d1))
self.assertNotIn(-1, d1)
for i in range(10):
self.assertIn(i, d1)
self.assertNotIn(10, d1)
# Test overridden behavior
class Proxy(object):
def __init__(self, x):
self.x = x
def __bool__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __ge__(self, other):
return self.x >= other
def __gt__(self, other):
return self.x > other
def __le__(self, other):
return self.x <= other
def __lt__(self, other):
return self.x < other
def __str__(self):
return "Proxy:%s" % self.x
def __repr__(self):
return "Proxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
self.assertFalse(p0)
self.assertFalse(not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
self.assertFalse(p0 != p0)
self.assertEqual(not p0, p1)
self.assertTrue(p0 < p1)
self.assertTrue(p0 <= p1)
self.assertTrue(p1 > p0)
self.assertTrue(p1 >= p0)
self.assertEqual(str(p0), "Proxy:0")
self.assertEqual(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
self.assertNotIn(-1, p10)
for i in range(10):
self.assertIn(i, p10)
self.assertNotIn(10, p10)
def test_weakrefs(self):
# Testing weak references...
import weakref
class C(object):
pass
c = C()
r = weakref.ref(c)
self.assertEqual(r(), c)
del c
support.gc_collect()
self.assertEqual(r(), None)
del r
class NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
try:
weakref.ref(no)
except TypeError as msg:
self.assertIn("weak reference", str(msg))
else:
self.fail("weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
self.assertEqual(r(), yes)
del yes
support.gc_collect()
self.assertEqual(r(), None)
del r
def test_properties(self):
# Testing property...
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
self.assertNotHasAttr(a, "x")
a.x = 42
self.assertEqual(a._C__x, 42)
self.assertEqual(a.x, 42)
del a.x
self.assertNotHasAttr(a, "x")
self.assertNotHasAttr(a, "_C__x")
C.x.__set__(a, 100)
self.assertEqual(C.x.__get__(a), 100)
C.x.__delete__(a)
self.assertNotHasAttr(a, "x")
raw = C.__dict__['x']
self.assertIsInstance(raw, property)
attrs = dir(raw)
self.assertIn("__doc__", attrs)
self.assertIn("fget", attrs)
self.assertIn("fset", attrs)
self.assertIn("fdel", attrs)
self.assertEqual(raw.__doc__, "I'm the x property.")
self.assertIs(raw.fget, C.__dict__['getx'])
self.assertIs(raw.fset, C.__dict__['setx'])
self.assertIs(raw.fdel, C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
except AttributeError as msg:
if str(msg).find('readonly') < 0:
self.fail("when setting readonly attr %r on a property, "
"got unexpected AttributeError msg %r" % (attr, str(msg)))
else:
self.fail("expected AttributeError from trying to set readonly %r "
"attr on a property" % attr)
class D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
try:
for i in d:
str(i)
except ZeroDivisionError:
pass
else:
self.fail("expected ZeroDivisionError from bad property")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_properties_doc_attrib(self):
class E(object):
def getter(self):
"getter method"
return 0
def setter(self_, value):
"setter method"
pass
prop = property(getter)
self.assertEqual(prop.__doc__, "getter method")
prop2 = property(fset=setter)
self.assertEqual(prop2.__doc__, None)
@support.cpython_only
def test_testcapi_no_segfault(self):
# this segfaulted in 2.5b2
try:
import _testcapi
except ImportError:
pass
else:
class X(object):
p = property(_testcapi.test_with_docstring)
def test_properties_plus(self):
class C(object):
foo = property(doc="hello")
@foo.getter
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self):
del self._foo
c = C()
self.assertEqual(C.foo.__doc__, "hello")
self.assertNotHasAttr(c, "foo")
c.foo = -42
self.assertHasAttr(c, '_foo')
self.assertEqual(c._foo, 42)
self.assertEqual(c.foo, 42)
del c.foo
self.assertNotHasAttr(c, '_foo')
self.assertNotHasAttr(c, "foo")
class D(C):
@C.foo.deleter
def foo(self):
try:
del self._foo
except AttributeError:
pass
d = D()
d.foo = 24
self.assertEqual(d.foo, 24)
del d.foo
del d.foo
class E(object):
@property
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
raise RuntimeError
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self, value=None):
del self._foo
e = E()
e.foo = -42
self.assertEqual(e.foo, 42)
del e.foo
class F(E):
@E.foo.deleter
def foo(self):
del self._foo
@foo.setter
def foo(self, value):
self._foo = max(0, value)
f = F()
f.foo = -10
self.assertEqual(f.foo, 0)
del f.foo
def test_dict_constructors(self):
# Testing dict constructor ...
d = dict()
self.assertEqual(d, {})
d = dict({})
self.assertEqual(d, {})
d = dict({1: 2, 'a': 'b'})
self.assertEqual(d, {1: 2, 'a': 'b'})
self.assertEqual(d, dict(list(d.items())))
self.assertEqual(d, dict(iter(d.items())))
d = dict({'one':1, 'two':2})
self.assertEqual(d, dict(one=1, two=2))
self.assertEqual(d, dict(**d))
self.assertEqual(d, dict({"one": 1}, two=2))
self.assertEqual(d, dict([("two", 2)], one=1))
self.assertEqual(d, dict([("one", 100), ("two", 200)], **d))
self.assertEqual(d, dict(**d))
for badarg in 0, 0, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
self.fail("no TypeError from dict(%r)" % badarg)
else:
self.fail("no TypeError from dict(%r)" % badarg)
try:
dict({}, {})
except TypeError:
pass
else:
self.fail("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
self.fail("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: list(self.dict.keys())
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(Mapping())
self.assertEqual(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
self.assertEqual(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
self.fail("no ValueError from dict(%r)" % bad)
def test_dir(self):
# Testing dir() ...
junk = 12
self.assertEqual(dir(), ['junk', 'self'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2, 2j, 2e0, [2], "2", b"2", (2,), {2:2}, type, self.test_dir:
dir(arg)
# Test dir on new-style classes. Since these have object as a
# base class, a lot more gets sucked in.
def interesting(strings):
return [s for s in strings if not s.startswith('_')]
class C(object):
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod']
self.assertEqual(interesting(dir(C)), cstuff)
c = C()
self.assertEqual(interesting(dir(c)), cstuff)
## self.assertIn('__self__', dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
self.assertEqual(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
## self.assertIn('__self__', dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
self.assertEqual(interesting(dir(A)), astuff)
## self.assertIn('__self__', dir(A.Amethod))
a = A()
self.assertEqual(interesting(dir(a)), astuff)
a.adata = 42
a.amethod = lambda self: 3
self.assertEqual(interesting(dir(a)), astuff + ['adata', 'amethod'])
## self.assertIn('__self__', dir(a.Amethod))
# Try a module subclass.
class M(type(sys)):
pass
minstance = M("m")
minstance.b = 2
minstance.a = 1
default_attributes = ['__name__', '__doc__', '__package__',
'__loader__', '__spec__']
names = [x for x in dir(minstance) if x not in default_attributes]
self.assertEqual(names, ['a', 'b'])
class M2(M):
def getdict(self):
return "Not a dict!"
__dict__ = property(getdict)
m2instance = M2("m2")
m2instance.b = 2
m2instance.a = 1
self.assertEqual(m2instance.__dict__, "Not a dict!")
try:
dir(m2instance)
except TypeError:
pass
# Two essentially featureless objects, just inheriting stuff from
# object.
self.assertEqual(dir(NotImplemented), dir(Ellipsis))
# Nasty test case for proxied objects
class Wrapper(object):
def __init__(self, obj):
self.__obj = obj
def __repr__(self):
return "Wrapper(%s)" % repr(self.__obj)
def __getitem__(self, key):
return Wrapper(self.__obj[key])
def __len__(self):
return len(self.__obj)
def __getattr__(self, name):
return Wrapper(getattr(self.__obj, name))
class C(object):
def __getclass(self):
return Wrapper(type(self))
__class__ = property(__getclass)
dir(C()) # This used to segfault
def test_supers(self):
# Testing super...
class A(object):
def meth(self, a):
return "A(%r)" % a
self.assertEqual(A().meth(1), "A(1)")
class B(A):
def __init__(self):
self.__super = super(B, self)
def meth(self, a):
return "B(%r)" % a + self.__super.meth(a)
self.assertEqual(B().meth(2), "B(2)A(2)")
class C(A):
def meth(self, a):
return "C(%r)" % a + self.__super.meth(a)
C._C__super = super(C)
self.assertEqual(C().meth(3), "C(3)A(3)")
class D(C, B):
def meth(self, a):
return "D(%r)" % a + super(D, self).meth(a)
self.assertEqual(D().meth(4), "D(4)C(4)B(4)A(4)")
# Test for subclassing super
class mysuper(super):
def __init__(self, *args):
return super(mysuper, self).__init__(*args)
class E(D):
def meth(self, a):
return "E(%r)" % a + mysuper(E, self).meth(a)
self.assertEqual(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
class F(E):
def meth(self, a):
s = self.__super # == mysuper(F, self)
return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
F._F__super = mysuper(F)
self.assertEqual(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
# Make sure certain errors are raised
try:
super(D, 42)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, 42)")
try:
super(D, C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, C())")
try:
super(D).__get__(12)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(12)")
try:
super(D).__get__(C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(C())")
# Make sure data descriptors can be overridden and accessed via super
# (new feature in Python 2.3)
class DDbase(object):
def getx(self): return 42
x = property(getx)
class DDsub(DDbase):
def getx(self): return "hello"
x = property(getx)
dd = DDsub()
self.assertEqual(dd.x, "hello")
self.assertEqual(super(DDsub, dd).x, 42)
# Ensure that super() lookup of descriptor from classmethod
# works (SF ID# 743627)
class Base(object):
aProp = property(lambda self: "foo")
class Sub(Base):
@classmethod
def test(klass):
return super(Sub,klass).aProp
self.assertEqual(Sub.test(), Base.aProp)
# Verify that super() doesn't allow keyword args
try:
super(Base, kw=1)
except TypeError:
pass
else:
self.assertEqual("super shouldn't accept keyword args")
def test_basic_inheritance(self):
# Testing inheritance from basic types...
class hexint(int):
def __repr__(self):
return hex(self)
def __add__(self, other):
return hexint(int.__add__(self, other))
# (Note that overriding __radd__ doesn't work,
# because the int type gets first dibs.)
self.assertEqual(repr(hexint(7) + 9), "0x10")
self.assertEqual(repr(hexint(1000) + 7), "0x3ef")
a = hexint(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertIs(int(a).__class__, int)
self.assertEqual(hash(a), hash(12345))
self.assertIs((+a).__class__, int)
self.assertIs((a >> 0).__class__, int)
self.assertIs((a << 0).__class__, int)
self.assertIs((hexint(0) << 12).__class__, int)
self.assertIs((hexint(0) >> 12).__class__, int)
class octlong(int):
__slots__ = []
def __str__(self):
return oct(self)
def __add__(self, other):
return self.__class__(super(octlong, self).__add__(other))
__radd__ = __add__
self.assertEqual(str(octlong(3) + 5), "0o10")
# (Note that overriding __radd__ here only seems to work
# because the example uses a short int left argument.)
self.assertEqual(str(5 + octlong(3000)), "0o5675")
a = octlong(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertEqual(hash(a), hash(12345))
self.assertIs(int(a).__class__, int)
self.assertIs((+a).__class__, int)
self.assertIs((-a).__class__, int)
self.assertIs((-octlong(0)).__class__, int)
self.assertIs((a >> 0).__class__, int)
self.assertIs((a << 0).__class__, int)
self.assertIs((a - 0).__class__, int)
self.assertIs((a * 1).__class__, int)
self.assertIs((a ** 1).__class__, int)
self.assertIs((a // 1).__class__, int)
self.assertIs((1 * a).__class__, int)
self.assertIs((a | 0).__class__, int)
self.assertIs((a ^ 0).__class__, int)
self.assertIs((a & -1).__class__, int)
self.assertIs((octlong(0) << 12).__class__, int)
self.assertIs((octlong(0) >> 12).__class__, int)
self.assertIs(abs(octlong(0)).__class__, int)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(int):
pass
a = longclone(1)
self.assertIs((a + 0).__class__, int)
self.assertIs((0 + a).__class__, int)
# Check that negative clones don't segfault
a = longclone(-1)
self.assertEqual(a.__dict__, {})
self.assertEqual(int(a), -1) # self.assertTrue PyNumber_Long() copies the sign bit
class precfloat(float):
__slots__ = ['prec']
def __init__(self, value=0.0, prec=12):
self.prec = int(prec)
def __repr__(self):
return "%.*g" % (self.prec, self)
self.assertEqual(repr(precfloat(1.1)), "1.1")
a = precfloat(12345)
self.assertEqual(a, 12345.0)
self.assertEqual(float(a), 12345.0)
self.assertIs(float(a).__class__, float)
self.assertEqual(hash(a), hash(12345.0))
self.assertIs((+a).__class__, float)
class madcomplex(complex):
def __repr__(self):
return "%.17gj%+.17g" % (self.imag, self.real)
a = madcomplex(-3, 4)
self.assertEqual(repr(a), "4j-3")
base = complex(-3, 4)
self.assertEqual(base.__class__, complex)
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
a = madcomplex(a) # just trying another form of the constructor
self.assertEqual(repr(a), "4j-3")
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
self.assertEqual(hash(a), hash(base))
self.assertEqual((+a).__class__, complex)
self.assertEqual((a + 0).__class__, complex)
self.assertEqual(a + 0, base)
self.assertEqual((a - 0).__class__, complex)
self.assertEqual(a - 0, base)
self.assertEqual((a * 1).__class__, complex)
self.assertEqual(a * 1, base)
self.assertEqual((a / 1).__class__, complex)
self.assertEqual(a / 1, base)
class madtuple(tuple):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(L)
return self._rev
a = madtuple((1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a, (1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
self.assertEqual(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
for i in range(512):
t = madtuple(range(i))
u = t.rev()
v = u.rev()
self.assertEqual(v, t)
a = madtuple((1,2,3,4,5))
self.assertEqual(tuple(a), (1,2,3,4,5))
self.assertIs(tuple(a).__class__, tuple)
self.assertEqual(hash(a), hash((1,2,3,4,5)))
self.assertIs(a[:].__class__, tuple)
self.assertIs((a * 1).__class__, tuple)
self.assertIs((a * 0).__class__, tuple)
self.assertIs((a + ()).__class__, tuple)
a = madtuple(())
self.assertEqual(tuple(a), ())
self.assertIs(tuple(a).__class__, tuple)
self.assertIs((a + a).__class__, tuple)
self.assertIs((a * 0).__class__, tuple)
self.assertIs((a * 1).__class__, tuple)
self.assertIs((a * 2).__class__, tuple)
self.assertIs(a[:].__class__, tuple)
class madstring(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
s = madstring("abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s, "abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
self.assertEqual(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
for i in range(256):
s = madstring("".join(map(chr, range(i))))
t = s.rev()
u = t.rev()
self.assertEqual(u, s)
s = madstring("12345")
self.assertEqual(str(s), "12345")
self.assertIs(str(s).__class__, str)
base = "\x00" * 5
s = madstring(base)
self.assertEqual(s, base)
self.assertEqual(str(s), base)
self.assertIs(str(s).__class__, str)
self.assertEqual(hash(s), hash(base))
self.assertEqual({s: 1}[base], 1)
self.assertEqual({base: 1}[s], 1)
self.assertIs((s + "").__class__, str)
self.assertEqual(s + "", base)
self.assertIs(("" + s).__class__, str)
self.assertEqual("" + s, base)
self.assertIs((s * 0).__class__, str)
self.assertEqual(s * 0, "")
self.assertIs((s * 1).__class__, str)
self.assertEqual(s * 1, base)
self.assertIs((s * 2).__class__, str)
self.assertEqual(s * 2, base + base)
self.assertIs(s[:].__class__, str)
self.assertEqual(s[:], base)
self.assertIs(s[0:0].__class__, str)
self.assertEqual(s[0:0], "")
self.assertIs(s.strip().__class__, str)
self.assertEqual(s.strip(), base)
self.assertIs(s.lstrip().__class__, str)
self.assertEqual(s.lstrip(), base)
self.assertIs(s.rstrip().__class__, str)
self.assertEqual(s.rstrip(), base)
identitytab = {}
self.assertIs(s.translate(identitytab).__class__, str)
self.assertEqual(s.translate(identitytab), base)
self.assertIs(s.replace("x", "x").__class__, str)
self.assertEqual(s.replace("x", "x"), base)
self.assertIs(s.ljust(len(s)).__class__, str)
self.assertEqual(s.ljust(len(s)), base)
self.assertIs(s.rjust(len(s)).__class__, str)
self.assertEqual(s.rjust(len(s)), base)
self.assertIs(s.center(len(s)).__class__, str)
self.assertEqual(s.center(len(s)), base)
self.assertIs(s.lower().__class__, str)
self.assertEqual(s.lower(), base)
class madunicode(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
u = madunicode("ABCDEF")
self.assertEqual(u, "ABCDEF")
self.assertEqual(u.rev(), madunicode("FEDCBA"))
self.assertEqual(u.rev().rev(), madunicode("ABCDEF"))
base = "12345"
u = madunicode(base)
self.assertEqual(str(u), base)
self.assertIs(str(u).__class__, str)
self.assertEqual(hash(u), hash(base))
self.assertEqual({u: 1}[base], 1)
self.assertEqual({base: 1}[u], 1)
self.assertIs(u.strip().__class__, str)
self.assertEqual(u.strip(), base)
self.assertIs(u.lstrip().__class__, str)
self.assertEqual(u.lstrip(), base)
self.assertIs(u.rstrip().__class__, str)
self.assertEqual(u.rstrip(), base)
self.assertIs(u.replace("x", "x").__class__, str)
self.assertEqual(u.replace("x", "x"), base)
self.assertIs(u.replace("xy", "xy").__class__, str)
self.assertEqual(u.replace("xy", "xy"), base)
self.assertIs(u.center(len(u)).__class__, str)
self.assertEqual(u.center(len(u)), base)
self.assertIs(u.ljust(len(u)).__class__, str)
self.assertEqual(u.ljust(len(u)), base)
self.assertIs(u.rjust(len(u)).__class__, str)
self.assertEqual(u.rjust(len(u)), base)
self.assertIs(u.lower().__class__, str)
self.assertEqual(u.lower(), base)
self.assertIs(u.upper().__class__, str)
self.assertEqual(u.upper(), base)
self.assertIs(u.capitalize().__class__, str)
self.assertEqual(u.capitalize(), base)
self.assertIs(u.title().__class__, str)
self.assertEqual(u.title(), base)
self.assertIs((u + "").__class__, str)
self.assertEqual(u + "", base)
self.assertIs(("" + u).__class__, str)
self.assertEqual("" + u, base)
self.assertIs((u * 0).__class__, str)
self.assertEqual(u * 0, "")
self.assertIs((u * 1).__class__, str)
self.assertEqual(u * 1, base)
self.assertIs((u * 2).__class__, str)
self.assertEqual(u * 2, base + base)
self.assertIs(u[:].__class__, str)
self.assertEqual(u[:], base)
self.assertIs(u[0:0].__class__, str)
self.assertEqual(u[0:0], "")
class sublist(list):
pass
a = sublist(range(5))
self.assertEqual(a, list(range(5)))
a.append("hello")
self.assertEqual(a, list(range(5)) + ["hello"])
a[5] = 5
self.assertEqual(a, list(range(6)))
a.extend(range(6, 20))
self.assertEqual(a, list(range(20)))
a[-5:] = []
self.assertEqual(a, list(range(15)))
del a[10:15]
self.assertEqual(len(a), 10)
self.assertEqual(a, list(range(10)))
self.assertEqual(list(a), list(range(10)))
self.assertEqual(a[0], 0)
self.assertEqual(a[9], 9)
self.assertEqual(a[-10], 0)
self.assertEqual(a[-1], 9)
self.assertEqual(a[:5], list(range(5)))
## class CountedInput(file):
## """Counts lines read by self.readline().
##
## self.lineno is the 0-based ordinal of the last line read, up to
## a maximum of one greater than the number of lines in the file.
##
## self.ateof is true if and only if the final "" line has been read,
## at which point self.lineno stops incrementing, and further calls
## to readline() continue to return "".
## """
##
## lineno = 0
## ateof = 0
## def readline(self):
## if self.ateof:
## return ""
## s = file.readline(self)
## # Next line works too.
## # s = super(CountedInput, self).readline()
## self.lineno += 1
## if s == "":
## self.ateof = 1
## return s
##
## f = file(name=support.TESTFN, mode='w')
## lines = ['a\n', 'b\n', 'c\n']
## try:
## f.writelines(lines)
## f.close()
## f = CountedInput(support.TESTFN)
## for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
## got = f.readline()
## self.assertEqual(expected, got)
## self.assertEqual(f.lineno, i)
## self.assertEqual(f.ateof, (i > len(lines)))
## f.close()
## finally:
## try:
## f.close()
## except:
## pass
## support.unlink(support.TESTFN)
def test_keywords(self):
# Testing keyword args to basic type constructors ...
self.assertEqual(int(x=1), 1)
self.assertEqual(float(x=2), 2.0)
self.assertEqual(int(x=3), 3)
self.assertEqual(complex(imag=42, real=666), complex(666, 42))
self.assertEqual(str(object=500), '500')
self.assertEqual(str(object=b'abc', errors='strict'), 'abc')
self.assertEqual(tuple(sequence=range(3)), (0, 1, 2))
self.assertEqual(list(sequence=(0, 1, 2)), list(range(3)))
# note: as of Python 2.3, dict() no longer has an "items" keyword arg
for constructor in (int, float, int, complex, str, str,
tuple, list):
try:
constructor(bogus_keyword_arg=1)
except TypeError:
pass
else:
self.fail("expected TypeError from bogus keyword argument to %r"
% constructor)
def test_str_subclass_as_dict_key(self):
# Testing a str subclass used as dict key ..
class cistr(str):
"""Sublcass of str that computes __eq__ case-insensitively.
Also computes a hash code of the string in canonical form.
"""
def __init__(self, value):
self.canonical = value.lower()
self.hashcode = hash(self.canonical)
def __eq__(self, other):
if not isinstance(other, cistr):
other = cistr(other)
return self.canonical == other.canonical
def __hash__(self):
return self.hashcode
self.assertEqual(cistr('ABC'), 'abc')
self.assertEqual('aBc', cistr('ABC'))
self.assertEqual(str(cistr('ABC')), 'ABC')
d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
self.assertEqual(d[cistr('one')], 1)
self.assertEqual(d[cistr('tWo')], 2)
self.assertEqual(d[cistr('THrEE')], 3)
self.assertIn(cistr('ONe'), d)
self.assertEqual(d.get(cistr('thrEE')), 3)
def test_classic_comparisons(self):
# Testing classic comparisons...
class classic:
pass
for base in (classic, int, object):
class C(base):
def __init__(self, value):
self.value = int(value)
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertEqual(eval("c[x] %s c[y]" % op),
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertEqual(eval("c[x] %s y" % op),
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertEqual(eval("x %s c[y]" % op),
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_rich_comparisons(self):
# Testing rich comparisons...
class Z(complex):
pass
z = Z(1)
self.assertEqual(z, 1+0j)
self.assertEqual(1+0j, z)
class ZZ(complex):
def __eq__(self, other):
try:
return abs(self - other) <= 1e-6
except:
return NotImplemented
zz = ZZ(1.0000003)
self.assertEqual(zz, 1+0j)
self.assertEqual(1+0j, zz)
class classic:
pass
for base in (classic, int, object, list):
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self_, other):
self.fail("shouldn't call __cmp__")
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertEqual(eval("c[x] %s c[y]" % op),
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertEqual(eval("c[x] %s y" % op),
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertEqual(eval("x %s c[y]" % op),
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_descrdoc(self):
# Testing descriptor doc strings...
from _io import FileIO
def check(descr, what):
self.assertEqual(descr.__doc__, what)
check(FileIO.closed, "True if the file is closed") # getset descriptor
check(complex.real, "the real part of a complex number") # member descriptor
def test_doc_descriptor(self):
# Testing __doc__ descriptor...
# SF bug 542984
class DocDescr(object):
def __get__(self, object, otype):
if object:
object = object.__class__.__name__ + ' instance'
if otype:
otype = otype.__name__
return 'object=%s; type=%s' % (object, otype)
class OldClass:
__doc__ = DocDescr()
class NewClass(object):
__doc__ = DocDescr()
self.assertEqual(OldClass.__doc__, 'object=None; type=OldClass')
self.assertEqual(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
self.assertEqual(NewClass.__doc__, 'object=None; type=NewClass')
self.assertEqual(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
def test_set_class(self):
# Testing __class__ assignment...
class C(object): pass
class D(object): pass
class E(object): pass
class F(D, E): pass
for cls in C, D, E, F:
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
self.assertIs(x.__class__, cls2)
x.__class__ = cls
self.assertIs(x.__class__, cls)
def cant(x, C):
try:
x.__class__ = C
except TypeError:
pass
else:
self.fail("shouldn't allow %r.__class__ = %r" % (x, C))
try:
delattr(x, "__class__")
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't allow del %r.__class__" % x)
cant(C(), list)
cant(list(), C)
cant(C(), 1)
cant(C(), object)
cant(object(), list)
cant(list(), object)
class Int(int): __slots__ = []
cant(2, Int)
cant(Int(), int)
cant(True, int)
cant(2, bool)
o = object()
cant(o, type(1))
cant(o, type(None))
del o
class G(object):
__slots__ = ["a", "b"]
class H(object):
__slots__ = ["b", "a"]
class I(object):
__slots__ = ["a", "b"]
class J(object):
__slots__ = ["c", "b"]
class K(object):
__slots__ = ["a", "b", "d"]
class L(H):
__slots__ = ["e"]
class M(I):
__slots__ = ["e"]
class N(J):
__slots__ = ["__weakref__"]
class P(J):
__slots__ = ["__dict__"]
class Q(J):
pass
class R(J):
__slots__ = ["__dict__", "__weakref__"]
for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)):
x = cls()
x.a = 1
x.__class__ = cls2
self.assertIs(x.__class__, cls2,
"assigning %r as __class__ for %r silently failed" % (cls2, x))
self.assertEqual(x.a, 1)
x.__class__ = cls
self.assertIs(x.__class__, cls,
"assigning %r as __class__ for %r silently failed" % (cls, x))
self.assertEqual(x.a, 1)
for cls in G, J, K, L, M, N, P, R, list, Int:
for cls2 in G, J, K, L, M, N, P, R, list, Int:
if cls is cls2:
continue
cant(cls(), cls2)
# Issue5283: when __class__ changes in __del__, the wrong
# type gets DECREF'd.
class O(object):
pass
class A(object):
def __del__(self):
self.__class__ = O
l = [A() for x in range(100)]
del l
def test_set_dict(self):
# Testing __dict__ assignment...
class C(object): pass
a = C()
a.__dict__ = {'b': 1}
self.assertEqual(a.b, 1)
def cant(x, dict):
try:
x.__dict__ = dict
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict))
cant(a, None)
cant(a, [])
cant(a, 1)
del a.__dict__ # Deleting __dict__ is allowed
class Base(object):
pass
def verify_dict_readonly(x):
"""
x has to be an instance of a class inheriting from Base.
"""
cant(x, {})
try:
del x.__dict__
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow del %r.__dict__" % x)
dict_descr = Base.__dict__["__dict__"]
try:
dict_descr.__set__(x, {})
except (AttributeError, TypeError):
pass
else:
self.fail("dict_descr allowed access to %r's dict" % x)
# Classes don't allow __dict__ assignment and have readonly dicts
class Meta1(type, Base):
pass
class Meta2(Base, type):
pass
class D(object, metaclass=Meta1):
pass
class E(object, metaclass=Meta2):
pass
for cls in C, D, E:
verify_dict_readonly(cls)
class_dict = cls.__dict__
try:
class_dict["spam"] = "eggs"
except TypeError:
pass
else:
self.fail("%r's __dict__ can be modified" % cls)
# Modules also disallow __dict__ assignment
class Module1(types.ModuleType, Base):
pass
class Module2(Base, types.ModuleType):
pass
for ModuleType in Module1, Module2:
mod = ModuleType("spam")
verify_dict_readonly(mod)
mod.__dict__["spam"] = "eggs"
# Exception's __dict__ can be replaced, but not deleted
# (at least not any more than regular exception's __dict__ can
# be deleted; on CPython it is not the case, whereas on PyPy they
# can, just like any other new-style instance's __dict__.)
def can_delete_dict(e):
try:
del e.__dict__
except (TypeError, AttributeError):
return False
else:
return True
class Exception1(Exception, Base):
pass
class Exception2(Base, Exception):
pass
for ExceptionType in Exception, Exception1, Exception2:
e = ExceptionType()
e.__dict__ = {"a": 1}
self.assertEqual(e.a, 1)
self.assertEqual(can_delete_dict(e), can_delete_dict(ValueError()))
def test_binary_operator_override(self):
# Testing overrides of binary operations...
class I(int):
def __repr__(self):
return "I(%r)" % int(self)
def __add__(self, other):
return I(int(self) + int(other))
__radd__ = __add__
def __pow__(self, other, mod=None):
if mod is None:
return I(pow(int(self), int(other)))
else:
return I(pow(int(self), int(other), int(mod)))
def __rpow__(self, other, mod=None):
if mod is None:
return I(pow(int(other), int(self), mod))
else:
return I(pow(int(other), int(self), int(mod)))
self.assertEqual(repr(I(1) + I(2)), "I(3)")
self.assertEqual(repr(I(1) + 2), "I(3)")
self.assertEqual(repr(1 + I(2)), "I(3)")
self.assertEqual(repr(I(2) ** I(3)), "I(8)")
self.assertEqual(repr(2 ** I(3)), "I(8)")
self.assertEqual(repr(I(2) ** 3), "I(8)")
self.assertEqual(repr(pow(I(2), I(3), I(5))), "I(3)")
class S(str):
def __eq__(self, other):
return self.lower() == other.lower()
def test_subclass_propagation(self):
# Testing propagation of slot functions to subclasses...
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
d = D()
orig_hash = hash(d) # related to id(d) in platform-dependent ways
A.__hash__ = lambda self: 42
self.assertEqual(hash(d), 42)
C.__hash__ = lambda self: 314
self.assertEqual(hash(d), 314)
B.__hash__ = lambda self: 144
self.assertEqual(hash(d), 144)
D.__hash__ = lambda self: 100
self.assertEqual(hash(d), 100)
D.__hash__ = None
self.assertRaises(TypeError, hash, d)
del D.__hash__
self.assertEqual(hash(d), 144)
B.__hash__ = None
self.assertRaises(TypeError, hash, d)
del B.__hash__
self.assertEqual(hash(d), 314)
C.__hash__ = None
self.assertRaises(TypeError, hash, d)
del C.__hash__
self.assertEqual(hash(d), 42)
A.__hash__ = None
self.assertRaises(TypeError, hash, d)
del A.__hash__
self.assertEqual(hash(d), orig_hash)
d.foo = 42
d.bar = 42
self.assertEqual(d.foo, 42)
self.assertEqual(d.bar, 42)
def __getattribute__(self, name):
if name == "foo":
return 24
return object.__getattribute__(self, name)
A.__getattribute__ = __getattribute__
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
def __getattr__(self, name):
if name in ("spam", "foo", "bar"):
return "hello"
raise AttributeError(name)
B.__getattr__ = __getattr__
self.assertEqual(d.spam, "hello")
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
del A.__getattribute__
self.assertEqual(d.foo, 42)
del d.foo
self.assertEqual(d.foo, "hello")
self.assertEqual(d.bar, 42)
del B.__getattr__
try:
d.foo
except AttributeError:
pass
else:
self.fail("d.foo should be undefined now")
# Test a nasty bug in recurse_down_subclasses()
class A(object):
pass
class B(A):
pass
del B
support.gc_collect()
A.__setitem__ = lambda *a: None # crash
def test_buffer_inheritance(self):
# Testing that buffer interface is inherited ...
import binascii
# SF bug [#470040] ParseTuple t# vs subclasses.
class MyBytes(bytes):
pass
base = b'abc'
m = MyBytes(base)
# b2a_hex uses the buffer interface to get its argument's value, via
# PyArg_ParseTuple 't#' code.
self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base))
class MyInt(int):
pass
m = MyInt(42)
try:
binascii.b2a_hex(m)
self.fail('subclass of int should not have a buffer interface')
except TypeError:
pass
def test_str_of_str_subclass(self):
# Testing __str__ defined in subclass of str ...
import binascii
import io
class octetstring(str):
def __str__(self):
return binascii.b2a_hex(self.encode('ascii')).decode("ascii")
def __repr__(self):
return self + " repr"
o = octetstring('A')
self.assertEqual(type(o), octetstring)
self.assertEqual(type(str(o)), str)
self.assertEqual(type(repr(o)), str)
self.assertEqual(ord(o), 0x41)
self.assertEqual(str(o), '41')
self.assertEqual(repr(o), 'A repr')
self.assertEqual(o.__str__(), '41')
self.assertEqual(o.__repr__(), 'A repr')
capture = io.StringIO()
# Calling str() or not exercises different internal paths.
print(o, file=capture)
print(str(o), file=capture)
self.assertEqual(capture.getvalue(), '41\n41\n')
capture.close()
def test_keyword_arguments(self):
# Testing keyword arguments to __init__, __call__...
def f(a): return a
self.assertEqual(f.__call__(a=42), 42)
a = []
list.__init__(a, sequence=[0, 1, 2])
self.assertEqual(a, [0, 1, 2])
def test_recursive_call(self):
# Testing recursive __call__() by setting to instance of class...
class A(object):
pass
A.__call__ = A()
try:
A()()
except RuntimeError:
pass
else:
self.fail("Recursion limit should have been reached for __call__()")
def test_delete_hook(self):
# Testing __del__ hook...
log = []
class C(object):
def __del__(self):
log.append(1)
c = C()
self.assertEqual(log, [])
del c
support.gc_collect()
self.assertEqual(log, [1])
class D(object): pass
d = D()
try: del d[0]
except TypeError: pass
else: self.fail("invalid del() didn't raise TypeError")
def test_hash_inheritance(self):
# Testing hash of mutable subclasses...
class mydict(dict):
pass
d = mydict()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of dict subclass should fail")
class mylist(list):
pass
d = mylist()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of list subclass should fail")
def test_str_operations(self):
try: 'a' + 5
except TypeError: pass
else: self.fail("'' + 5 doesn't raise TypeError")
try: ''.split('')
except ValueError: pass
else: self.fail("''.split('') doesn't raise ValueError")
try: ''.join([0])
except TypeError: pass
else: self.fail("''.join([0]) doesn't raise TypeError")
try: ''.rindex('5')
except ValueError: pass
else: self.fail("''.rindex('5') doesn't raise ValueError")
try: '%(n)s' % None
except TypeError: pass
else: self.fail("'%(n)s' % None doesn't raise TypeError")
try: '%(n' % {}
except ValueError: pass
else: self.fail("'%(n' % {} '' doesn't raise ValueError")
try: '%*s' % ('abc')
except TypeError: pass
else: self.fail("'%*s' % ('abc') doesn't raise TypeError")
try: '%*.*s' % ('abc', 5)
except TypeError: pass
else: self.fail("'%*.*s' % ('abc', 5) doesn't raise TypeError")
try: '%s' % (1, 2)
except TypeError: pass
else: self.fail("'%s' % (1, 2) doesn't raise TypeError")
try: '%' % None
except ValueError: pass
else: self.fail("'%' % None doesn't raise ValueError")
self.assertEqual('534253'.isdigit(), 1)
self.assertEqual('534253x'.isdigit(), 0)
self.assertEqual('%c' % 5, '\x05')
self.assertEqual('%c' % '5', '5')
def test_deepcopy_recursive(self):
# Testing deepcopy of recursive objects...
class Node:
pass
a = Node()
b = Node()
a.b = b
b.a = a
z = deepcopy(a) # This blew up before
def test_unintialized_modules(self):
# Testing uninitialized module objects...
from types import ModuleType as M
m = M.__new__(M)
str(m)
self.assertNotHasAttr(m, "__name__")
self.assertNotHasAttr(m, "__file__")
self.assertNotHasAttr(m, "foo")
self.assertFalse(m.__dict__) # None or {} are both reasonable answers
m.foo = 1
self.assertEqual(m.__dict__, {"foo": 1})
def test_funny_new(self):
# Testing __new__ returning something unexpected...
class C(object):
def __new__(cls, arg):
if isinstance(arg, str): return [1, 2, 3]
elif isinstance(arg, int): return object.__new__(D)
else: return object.__new__(cls)
class D(C):
def __init__(self, arg):
self.foo = arg
self.assertEqual(C("1"), [1, 2, 3])
self.assertEqual(D("1"), [1, 2, 3])
d = D(None)
self.assertEqual(d.foo, None)
d = C(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
d = D(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
def test_imul_bug(self):
# Testing for __imul__ problems...
# SF bug 544647
class C(object):
def __imul__(self, other):
return (self, other)
x = C()
y = x
y *= 1.0
self.assertEqual(y, (x, 1.0))
y = x
y *= 2
self.assertEqual(y, (x, 2))
y = x
y *= 3
self.assertEqual(y, (x, 3))
y = x
y *= 1<<100
self.assertEqual(y, (x, 1<<100))
y = x
y *= None
self.assertEqual(y, (x, None))
y = x
y *= "foo"
self.assertEqual(y, (x, "foo"))
def test_copy_setstate(self):
# Testing that copy.*copy() correctly uses __setstate__...
import copy
class C(object):
def __init__(self, foo=None):
self.foo = foo
self.__foo = foo
def setfoo(self, foo=None):
self.foo = foo
def getfoo(self):
return self.__foo
def __getstate__(self):
return [self.foo]
def __setstate__(self_, lst):
self.assertEqual(len(lst), 1)
self_.__foo = self_.foo = lst[0]
a = C(42)
a.setfoo(24)
self.assertEqual(a.foo, 24)
self.assertEqual(a.getfoo(), 42)
b = copy.copy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
b = copy.deepcopy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
def test_slices(self):
# Testing cases with slices and overridden __getitem__ ...
# Strings
self.assertEqual("hello"[:4], "hell")
self.assertEqual("hello"[slice(4)], "hell")
self.assertEqual(str.__getitem__("hello", slice(4)), "hell")
class S(str):
def __getitem__(self, x):
return str.__getitem__(self, x)
self.assertEqual(S("hello")[:4], "hell")
self.assertEqual(S("hello")[slice(4)], "hell")
self.assertEqual(S("hello").__getitem__(slice(4)), "hell")
# Tuples
self.assertEqual((1,2,3)[:2], (1,2))
self.assertEqual((1,2,3)[slice(2)], (1,2))
self.assertEqual(tuple.__getitem__((1,2,3), slice(2)), (1,2))
class T(tuple):
def __getitem__(self, x):
return tuple.__getitem__(self, x)
self.assertEqual(T((1,2,3))[:2], (1,2))
self.assertEqual(T((1,2,3))[slice(2)], (1,2))
self.assertEqual(T((1,2,3)).__getitem__(slice(2)), (1,2))
# Lists
self.assertEqual([1,2,3][:2], [1,2])
self.assertEqual([1,2,3][slice(2)], [1,2])
self.assertEqual(list.__getitem__([1,2,3], slice(2)), [1,2])
class L(list):
def __getitem__(self, x):
return list.__getitem__(self, x)
self.assertEqual(L([1,2,3])[:2], [1,2])
self.assertEqual(L([1,2,3])[slice(2)], [1,2])
self.assertEqual(L([1,2,3]).__getitem__(slice(2)), [1,2])
# Now do lists and __setitem__
a = L([1,2,3])
a[slice(1, 3)] = [3,2]
self.assertEqual(a, [1,3,2])
a[slice(0, 2, 1)] = [3,1]
self.assertEqual(a, [3,1,2])
a.__setitem__(slice(1, 3), [2,1])
self.assertEqual(a, [3,2,1])
a.__setitem__(slice(0, 2, 1), [2,3])
self.assertEqual(a, [2,3,1])
def test_subtype_resurrection(self):
# Testing resurrection of new-style instance...
class C(object):
container = []
def __del__(self):
# resurrect the instance
C.container.append(self)
c = C()
c.attr = 42
# The most interesting thing here is whether this blows up, due to
# flawed GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1
# bug).
del c
support.gc_collect()
self.assertEqual(len(C.container), 1)
# Make c mortal again, so that the test framework with -l doesn't report
# it as a leak.
del C.__del__
def test_slots_trash(self):
# Testing slot trash...
# Deallocating deeply nested slotted trash caused stack overflows
class trash(object):
__slots__ = ['x']
def __init__(self, x):
self.x = x
o = None
for i in range(50000):
o = trash(o)
del o
def test_slots_multiple_inheritance(self):
# SF bug 575229, multiple inheritance w/ slots dumps core
class A(object):
__slots__=()
class B(object):
pass
class C(A,B) :
__slots__=()
if support.check_impl_detail():
self.assertEqual(C.__basicsize__, B.__basicsize__)
self.assertHasAttr(C, '__dict__')
self.assertHasAttr(C, '__weakref__')
C().x = 2
def test_rmul(self):
# Testing correct invocation of __rmul__...
# SF patch 592646
class C(object):
def __mul__(self, other):
return "mul"
def __rmul__(self, other):
return "rmul"
a = C()
self.assertEqual(a*2, "mul")
self.assertEqual(a*2.2, "mul")
self.assertEqual(2*a, "rmul")
self.assertEqual(2.2*a, "rmul")
def test_ipow(self):
# Testing correct invocation of __ipow__...
# [SF bug 620179]
class C(object):
def __ipow__(self, other):
pass
a = C()
a **= 2
def test_mutable_bases(self):
# Testing mutable bases...
# stuff that should work:
class C(object):
pass
class C2(object):
def __getattribute__(self, attr):
if attr == 'a':
return 2
else:
return super(C2, self).__getattribute__(attr)
def meth(self):
return 1
class D(C):
pass
class E(D):
pass
d = D()
e = E()
D.__bases__ = (C,)
D.__bases__ = (C2,)
self.assertEqual(d.meth(), 1)
self.assertEqual(e.meth(), 1)
self.assertEqual(d.a, 2)
self.assertEqual(e.a, 2)
self.assertEqual(C2.__subclasses__(), [D])
try:
del D.__bases__
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't be able to delete .__bases__")
try:
D.__bases__ = ()
except TypeError as msg:
if str(msg) == "a new-style class can't have only classic bases":
self.fail("wrong error message for .__bases__ = ()")
else:
self.fail("shouldn't be able to set .__bases__ to ()")
try:
D.__bases__ = (D,)
except TypeError:
pass
else:
# actually, we'll have crashed by here...
self.fail("shouldn't be able to create inheritance cycles")
try:
D.__bases__ = (C, C)
except TypeError:
pass
else:
self.fail("didn't detect repeated base classes")
try:
D.__bases__ = (E,)
except TypeError:
pass
else:
self.fail("shouldn't be able to create inheritance cycles")
def test_builtin_bases(self):
# Make sure all the builtin types can have their base queried without
# segfaulting. See issue #5787.
builtin_types = [tp for tp in builtins.__dict__.values()
if isinstance(tp, type)]
for tp in builtin_types:
object.__getattribute__(tp, "__bases__")
if tp is not object:
self.assertEqual(len(tp.__bases__), 1, tp)
class L(list):
pass
class C(object):
pass
class D(C):
pass
try:
L.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't turn list subclass into dict subclass")
try:
list.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't be able to assign to list.__bases__")
try:
D.__bases__ = (C, list)
except TypeError:
pass
else:
assert 0, "best_base calculation found wanting"
def test_mutable_bases_with_failing_mro(self):
# Testing mutable bases with failing mro...
class WorkOnce(type):
def __new__(self, name, bases, ns):
self.flag = 0
return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns)
def mro(self):
if self.flag > 0:
raise RuntimeError("bozo")
else:
self.flag += 1
return type.mro(self)
class WorkAlways(type):
def mro(self):
# this is here to make sure that .mro()s aren't called
# with an exception set (which was possible at one point).
# An error message will be printed in a debug build.
# What's a good way to test for this?
return type.mro(self)
class C(object):
pass
class C2(object):
pass
class D(C):
pass
class E(D):
pass
class F(D, metaclass=WorkOnce):
pass
class G(D, metaclass=WorkAlways):
pass
# Immediate subclasses have their mro's adjusted in alphabetical
# order, so E's will get adjusted before adjusting F's fails. We
# check here that E's gets restored.
E_mro_before = E.__mro__
D_mro_before = D.__mro__
try:
D.__bases__ = (C2,)
except RuntimeError:
self.assertEqual(E.__mro__, E_mro_before)
self.assertEqual(D.__mro__, D_mro_before)
else:
self.fail("exception not propagated")
def test_mutable_bases_catch_mro_conflict(self):
# Testing mutable bases catch mro conflict...
class A(object):
pass
class B(object):
pass
class C(A, B):
pass
class D(A, B):
pass
class E(C, D):
pass
try:
C.__bases__ = (B, A)
except TypeError:
pass
else:
self.fail("didn't catch MRO conflict")
def test_mutable_names(self):
# Testing mutable names...
class C(object):
pass
# C.__module__ could be 'test_descr' or '__main__'
mod = C.__module__
C.__name__ = 'D'
self.assertEqual((C.__module__, C.__name__), (mod, 'D'))
C.__name__ = 'D.E'
self.assertEqual((C.__module__, C.__name__), (mod, 'D.E'))
def test_evil_type_name(self):
# A badly placed Py_DECREF in type_set_name led to arbitrary code
# execution while the type structure was not in a sane state, and a
# possible segmentation fault as a result. See bug #16447.
class Nasty(str):
def __del__(self):
C.__name__ = "other"
class C:
pass
C.__name__ = Nasty("abc")
C.__name__ = "normal"
def test_subclass_right_op(self):
# Testing correct dispatch of subclass overloading __r<op>__...
# This code tests various cases where right-dispatch of a subclass
# should be preferred over left-dispatch of a base class.
# Case 1: subclass of int; this tests code in abstract.c::binary_op1()
class B(int):
def __floordiv__(self, other):
return "B.__floordiv__"
def __rfloordiv__(self, other):
return "B.__rfloordiv__"
self.assertEqual(B(1) // 1, "B.__floordiv__")
self.assertEqual(1 // B(1), "B.__rfloordiv__")
# Case 2: subclass of object; this is just the baseline for case 3
class C(object):
def __floordiv__(self, other):
return "C.__floordiv__"
def __rfloordiv__(self, other):
return "C.__rfloordiv__"
self.assertEqual(C() // 1, "C.__floordiv__")
self.assertEqual(1 // C(), "C.__rfloordiv__")
# Case 3: subclass of new-style class; here it gets interesting
class D(C):
def __floordiv__(self, other):
return "D.__floordiv__"
def __rfloordiv__(self, other):
return "D.__rfloordiv__"
self.assertEqual(D() // C(), "D.__floordiv__")
self.assertEqual(C() // D(), "D.__rfloordiv__")
# Case 4: this didn't work right in 2.2.2 and 2.3a1
class E(C):
pass
self.assertEqual(E.__rfloordiv__, C.__rfloordiv__)
self.assertEqual(E() // 1, "C.__floordiv__")
self.assertEqual(1 // E(), "C.__rfloordiv__")
self.assertEqual(E() // C(), "C.__floordiv__")
self.assertEqual(C() // E(), "C.__floordiv__") # This one would fail
@support.impl_detail("testing an internal kind of method object")
def test_meth_class_get(self):
# Testing __get__ method of METH_CLASS C methods...
# Full coverage of descrobject.c::classmethod_get()
# Baseline
arg = [1, 2, 3]
res = {1: None, 2: None, 3: None}
self.assertEqual(dict.fromkeys(arg), res)
self.assertEqual({}.fromkeys(arg), res)
# Now get the descriptor
descr = dict.__dict__["fromkeys"]
# More baseline using the descriptor directly
self.assertEqual(descr.__get__(None, dict)(arg), res)
self.assertEqual(descr.__get__({})(arg), res)
# Now check various error cases
try:
descr.__get__(None, None)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, None)")
try:
descr.__get__(42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(42)")
try:
descr.__get__(None, 42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, 42)")
try:
descr.__get__(None, int)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, int)")
def test_isinst_isclass(self):
# Testing proxy isinstance() and isclass()...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
# Test with a classic class
class C:
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a classic subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style class
class C(object):
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
def test_proxy_super(self):
# Testing super() for a proxy object...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
class B(object):
def f(self):
return "B.f"
class C(B):
def f(self):
return super(C, self).f() + "->C.f"
obj = C()
p = Proxy(obj)
self.assertEqual(C.__dict__["f"](p), "B.f->C.f")
def test_carloverre(self):
# Testing prohibition of Carlo Verre's hack...
try:
object.__setattr__(str, "foo", 42)
except TypeError:
pass
else:
self.fail("Carlo Verre __setattr__ succeeded!")
try:
object.__delattr__(str, "lower")
except TypeError:
pass
else:
self.fail("Carlo Verre __delattr__ succeeded!")
def test_weakref_segfault(self):
# Testing weakref segfault...
# SF 742911
import weakref
class Provoker:
def __init__(self, referrent):
self.ref = weakref.ref(referrent)
def __del__(self):
x = self.ref()
class Oops(object):
pass
o = Oops()
o.whatever = Provoker(o)
del o
def test_wrapper_segfault(self):
# SF 927248: deeply nested wrappers could cause stack overflow
f = lambda:None
for i in range(1000000):
f = f.__call__
f = None
def test_file_fault(self):
# Testing sys.stdout is changed in getattr...
test_stdout = sys.stdout
class StdoutGuard:
def __getattr__(self, attr):
sys.stdout = sys.__stdout__
raise RuntimeError("Premature access to sys.stdout.%s" % attr)
sys.stdout = StdoutGuard()
try:
print("Oops!")
except RuntimeError:
pass
finally:
sys.stdout = test_stdout
def test_vicious_descriptor_nonsense(self):
# Testing vicious_descriptor_nonsense...
# A potential segfault spotted by Thomas Wouters in mail to
# python-dev 2003-04-17, turned into an example & fixed by Michael
# Hudson just less than four months later...
class Evil(object):
def __hash__(self):
return hash('attr')
def __eq__(self, other):
del C.attr
return 0
class Descr(object):
def __get__(self, ob, type=None):
return 1
class C(object):
attr = Descr()
c = C()
c.__dict__[Evil()] = 0
self.assertEqual(c.attr, 1)
# this makes a crash more likely:
support.gc_collect()
self.assertNotHasAttr(c, 'attr')
def test_init(self):
# SF 1155938
class Foo(object):
def __init__(self):
return 10
try:
Foo()
except TypeError:
pass
else:
self.fail("did not test __init__() for None return")
def test_method_wrapper(self):
# Testing method-wrapper objects...
# <type 'method-wrapper'> did not support any reflection before 2.5
# XXX should methods really support __eq__?
l = []
self.assertEqual(l.__add__, l.__add__)
self.assertEqual(l.__add__, [].__add__)
self.assertNotEqual(l.__add__, [5].__add__)
self.assertNotEqual(l.__add__, l.__mul__)
self.assertEqual(l.__add__.__name__, '__add__')
if hasattr(l.__add__, '__self__'):
# CPython
self.assertIs(l.__add__.__self__, l)
self.assertIs(l.__add__.__objclass__, list)
else:
# Python implementations where [].__add__ is a normal bound method
self.assertIs(l.__add__.im_self, l)
self.assertIs(l.__add__.im_class, list)
self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
try:
hash(l.__add__)
except TypeError:
pass
else:
self.fail("no TypeError from hash([].__add__)")
t = ()
t += (7,)
self.assertEqual(t.__add__, (7,).__add__)
self.assertEqual(hash(t.__add__), hash((7,).__add__))
def test_not_implemented(self):
# Testing NotImplemented...
# all binary methods should be able to return a NotImplemented
import operator
def specialmethod(self, other):
return NotImplemented
def check(expr, x, y):
try:
exec(expr, {'x': x, 'y': y, 'operator': operator})
except TypeError:
pass
else:
self.fail("no TypeError from %r" % (expr,))
N1 = sys.maxsize + 1 # might trigger OverflowErrors instead of
# TypeErrors
N2 = sys.maxsize # if sizeof(int) < sizeof(long), might trigger
# ValueErrors instead of TypeErrors
for name, expr, iexpr in [
('__add__', 'x + y', 'x += y'),
('__sub__', 'x - y', 'x -= y'),
('__mul__', 'x * y', 'x *= y'),
('__truediv__', 'operator.truediv(x, y)', None),
('__floordiv__', 'operator.floordiv(x, y)', None),
('__div__', 'x / y', 'x /= y'),
('__mod__', 'x % y', 'x %= y'),
('__divmod__', 'divmod(x, y)', None),
('__pow__', 'x ** y', 'x **= y'),
('__lshift__', 'x << y', 'x <<= y'),
('__rshift__', 'x >> y', 'x >>= y'),
('__and__', 'x & y', 'x &= y'),
('__or__', 'x | y', 'x |= y'),
('__xor__', 'x ^ y', 'x ^= y')]:
rname = '__r' + name[2:]
A = type('A', (), {name: specialmethod})
a = A()
check(expr, a, a)
check(expr, a, N1)
check(expr, a, N2)
if iexpr:
check(iexpr, a, a)
check(iexpr, a, N1)
check(iexpr, a, N2)
iname = '__i' + name[2:]
C = type('C', (), {iname: specialmethod})
c = C()
check(iexpr, c, a)
check(iexpr, c, N1)
check(iexpr, c, N2)
def test_assign_slice(self):
# ceval.c's assign_slice used to check for
# tp->tp_as_sequence->sq_slice instead of
# tp->tp_as_sequence->sq_ass_slice
class C(object):
def __setitem__(self, idx, value):
self.value = value
c = C()
c[1:2] = 3
self.assertEqual(c.value, 3)
def test_set_and_no_get(self):
# See
# http://mail.python.org/pipermail/python-dev/2010-January/095637.html
class Descr(object):
def __init__(self, name):
self.name = name
def __set__(self, obj, value):
obj.__dict__[self.name] = value
descr = Descr("a")
class X(object):
a = descr
x = X()
self.assertIs(x.a, descr)
x.a = 42
self.assertEqual(x.a, 42)
# Also check type_getattro for correctness.
class Meta(type):
pass
class X(object):
__metaclass__ = Meta
X.a = 42
Meta.a = Descr("a")
self.assertEqual(X.a, 42)
def test_getattr_hooks(self):
# issue 4230
class Descriptor(object):
counter = 0
def __get__(self, obj, objtype=None):
def getter(name):
self.counter += 1
raise AttributeError(name)
return getter
descr = Descriptor()
class A(object):
__getattribute__ = descr
class B(object):
__getattr__ = descr
class C(object):
__getattribute__ = descr
__getattr__ = descr
self.assertRaises(AttributeError, getattr, A(), "attr")
self.assertEqual(descr.counter, 1)
self.assertRaises(AttributeError, getattr, B(), "attr")
self.assertEqual(descr.counter, 2)
self.assertRaises(AttributeError, getattr, C(), "attr")
self.assertEqual(descr.counter, 4)
class EvilGetattribute(object):
# This used to segfault
def __getattr__(self, name):
raise AttributeError(name)
def __getattribute__(self, name):
del EvilGetattribute.__getattr__
for i in range(5):
gc.collect()
raise AttributeError(name)
self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr")
def test_type___getattribute__(self):
self.assertRaises(TypeError, type.__getattribute__, list, type)
def test_abstractmethods(self):
# type pretends not to have __abstractmethods__.
self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
class meta(type):
pass
self.assertRaises(AttributeError, getattr, meta, "__abstractmethods__")
class X(object):
pass
with self.assertRaises(AttributeError):
del X.__abstractmethods__
def test_proxy_call(self):
class FakeStr:
__class__ = str
fake_str = FakeStr()
# isinstance() reads __class__
self.assertIsInstance(fake_str, str)
# call a method descriptor
with self.assertRaises(TypeError):
str.split(fake_str)
# call a slot wrapper descriptor
with self.assertRaises(TypeError):
str.__add__(fake_str, "abc")
def test_repr_as_str(self):
# Issue #11603: crash or infinite loop when rebinding __str__ as
# __repr__.
class Foo:
pass
Foo.__repr__ = Foo.__str__
foo = Foo()
self.assertRaises(RuntimeError, str, foo)
self.assertRaises(RuntimeError, repr, foo)
def test_mixing_slot_wrappers(self):
class X(dict):
__setattr__ = dict.__setitem__
x = X()
x.y = 42
self.assertEqual(x["y"], 42)
def test_slot_shadows_class_variable(self):
with self.assertRaises(ValueError) as cm:
class X:
__slots__ = ["foo"]
foo = None
m = str(cm.exception)
self.assertEqual("'foo' in __slots__ conflicts with class variable", m)
def test_set_doc(self):
class X:
"elephant"
X.__doc__ = "banana"
self.assertEqual(X.__doc__, "banana")
with self.assertRaises(TypeError) as cm:
type(list).__dict__["__doc__"].__set__(list, "blah")
self.assertIn("can't set list.__doc__", str(cm.exception))
with self.assertRaises(TypeError) as cm:
type(X).__dict__["__doc__"].__delete__(X)
self.assertIn("can't delete X.__doc__", str(cm.exception))
self.assertEqual(X.__doc__, "banana")
def test_qualname(self):
descriptors = [str.lower, complex.real, float.real, int.__add__]
types = ['method', 'member', 'getset', 'wrapper']
# make sure we have an example of each type of descriptor
for d, n in zip(descriptors, types):
self.assertEqual(type(d).__name__, n + '_descriptor')
for d in descriptors:
qualname = d.__objclass__.__qualname__ + '.' + d.__name__
self.assertEqual(d.__qualname__, qualname)
self.assertEqual(str.lower.__qualname__, 'str.lower')
self.assertEqual(complex.real.__qualname__, 'complex.real')
self.assertEqual(float.real.__qualname__, 'float.real')
self.assertEqual(int.__add__.__qualname__, 'int.__add__')
class X:
pass
with self.assertRaises(TypeError):
del X.__qualname__
self.assertRaises(TypeError, type.__dict__['__qualname__'].__set__,
str, 'Oink')
global Y
class Y:
class Inside:
pass
self.assertEqual(Y.__qualname__, 'Y')
self.assertEqual(Y.Inside.__qualname__, 'Y.Inside')
def test_qualname_dict(self):
ns = {'__qualname__': 'some.name'}
tp = type('Foo', (), ns)
self.assertEqual(tp.__qualname__, 'some.name')
self.assertNotIn('__qualname__', tp.__dict__)
self.assertEqual(ns, {'__qualname__': 'some.name'})
ns = {'__qualname__': 1}
self.assertRaises(TypeError, type, 'Foo', (), ns)
def test_cycle_through_dict(self):
# See bug #1469629
class X(dict):
def __init__(self):
dict.__init__(self)
self.__dict__ = self
x = X()
x.attr = 42
wr = weakref.ref(x)
del x
support.gc_collect()
self.assertIsNone(wr())
for o in gc.get_objects():
self.assertIsNot(type(o), X)
def test_object_new_and_init_with_parameters(self):
# See issue #1683368
class OverrideNeither:
pass
self.assertRaises(TypeError, OverrideNeither, 1)
self.assertRaises(TypeError, OverrideNeither, kw=1)
class OverrideNew:
def __new__(cls, foo, kw=0, *args, **kwds):
return object.__new__(cls, *args, **kwds)
class OverrideInit:
def __init__(self, foo, kw=0, *args, **kwargs):
return object.__init__(self, *args, **kwargs)
class OverrideBoth(OverrideNew, OverrideInit):
pass
for case in OverrideNew, OverrideInit, OverrideBoth:
case(1)
case(1, kw=2)
self.assertRaises(TypeError, case, 1, 2, 3)
self.assertRaises(TypeError, case, 1, 2, foo=3)
def test_subclassing_does_not_duplicate_dict_descriptors(self):
class Base:
pass
class Sub(Base):
pass
self.assertIn("__dict__", Base.__dict__)
self.assertNotIn("__dict__", Sub.__dict__)
class DictProxyTests(unittest.TestCase):
def setUp(self):
class C(object):
def meth(self):
pass
self.C = C
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_keys(self):
# Testing dict-proxy keys...
it = self.C.__dict__.keys()
self.assertNotIsInstance(it, list)
keys = list(it)
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_values(self):
# Testing dict-proxy values...
it = self.C.__dict__.values()
self.assertNotIsInstance(it, list)
values = list(it)
self.assertEqual(len(values), 5)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_items(self):
# Testing dict-proxy iteritems...
it = self.C.__dict__.items()
self.assertNotIsInstance(it, list)
keys = [item[0] for item in it]
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
def test_dict_type_with_metaclass(self):
# Testing type of __dict__ when metaclass set...
class B(object):
pass
class M(type):
pass
class C(metaclass=M):
# In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
pass
self.assertEqual(type(C.__dict__), type(B.__dict__))
def test_repr(self):
# Testing mappingproxy.__repr__.
# We can't blindly compare with the repr of another dict as ordering
# of keys and values is arbitrary and may differ.
r = repr(self.C.__dict__)
self.assertTrue(r.startswith('mappingproxy('), r)
self.assertTrue(r.endswith(')'), r)
for k, v in self.C.__dict__.items():
self.assertIn('{!r}: {!r}'.format(k, v), r)
class PTypesLongInitTest(unittest.TestCase):
# This is in its own TestCase so that it can be run before any other tests.
def test_pytype_long_ready(self):
# Testing SF bug 551412 ...
# This dumps core when SF bug 551412 isn't fixed --
# but only when test_descr.py is run separately.
# (That can't be helped -- as soon as PyType_Ready()
# is called for PyLong_Type, the bug is gone.)
class UserLong(object):
def __pow__(self, *args):
pass
try:
pow(0, UserLong(), 0)
except:
pass
# Another segfault only when run early
# (before PyType_Ready(tuple) is called)
type.mro(tuple)
class MiscTests(unittest.TestCase):
def test_type_lookup_mro_reference(self):
# Issue #14199: _PyType_Lookup() has to keep a strong reference to
# the type MRO because it may be modified during the lookup, if
# __bases__ is set during the lookup for example.
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __eq__(self, other):
X.__bases__ = (Base2,)
class Base(object):
mykey = 'from Base'
mykey2 = 'from Base'
class Base2(object):
mykey = 'from Base2'
mykey2 = 'from Base2'
X = type('X', (Base,), {MyKey(): 5})
# mykey is read from Base
self.assertEqual(X.mykey, 'from Base')
# mykey2 is read from Base2 because MyKey.__eq__ has set __bases__
self.assertEqual(X.mykey2, 'from Base2')
class PicklingTests(unittest.TestCase):
def _check_reduce(self, proto, obj, args=(), kwargs={}, state=None,
listitems=None, dictitems=None):
if proto >= 4:
reduce_value = obj.__reduce_ex__(proto)
self.assertEqual(reduce_value[:3],
(copyreg.__newobj_ex__,
(type(obj), args, kwargs),
state))
if listitems is not None:
self.assertListEqual(list(reduce_value[3]), listitems)
else:
self.assertIsNone(reduce_value[3])
if dictitems is not None:
self.assertDictEqual(dict(reduce_value[4]), dictitems)
else:
self.assertIsNone(reduce_value[4])
elif proto >= 2:
reduce_value = obj.__reduce_ex__(proto)
self.assertEqual(reduce_value[:3],
(copyreg.__newobj__,
(type(obj),) + args,
state))
if listitems is not None:
self.assertListEqual(list(reduce_value[3]), listitems)
else:
self.assertIsNone(reduce_value[3])
if dictitems is not None:
self.assertDictEqual(dict(reduce_value[4]), dictitems)
else:
self.assertIsNone(reduce_value[4])
else:
base_type = type(obj).__base__
reduce_value = (copyreg._reconstructor,
(type(obj),
base_type,
None if base_type is object else base_type(obj)))
if state is not None:
reduce_value += (state,)
self.assertEqual(obj.__reduce_ex__(proto), reduce_value)
self.assertEqual(obj.__reduce__(), reduce_value)
def test_reduce(self):
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
args = (-101, "spam")
kwargs = {'bacon': -201, 'fish': -301}
state = {'cheese': -401}
class C1:
def __getnewargs__(self):
return args
obj = C1()
for proto in protocols:
self._check_reduce(proto, obj, args)
for name, value in state.items():
setattr(obj, name, value)
for proto in protocols:
self._check_reduce(proto, obj, args, state=state)
class C2:
def __getnewargs__(self):
return "bad args"
obj = C2()
for proto in protocols:
if proto >= 2:
with self.assertRaises(TypeError):
obj.__reduce_ex__(proto)
class C3:
def __getnewargs_ex__(self):
return (args, kwargs)
obj = C3()
for proto in protocols:
if proto >= 4:
self._check_reduce(proto, obj, args, kwargs)
elif proto >= 2:
with self.assertRaises(ValueError):
obj.__reduce_ex__(proto)
class C4:
def __getnewargs_ex__(self):
return (args, "bad dict")
class C5:
def __getnewargs_ex__(self):
return ("bad tuple", kwargs)
class C6:
def __getnewargs_ex__(self):
return ()
class C7:
def __getnewargs_ex__(self):
return "bad args"
for proto in protocols:
for cls in C4, C5, C6, C7:
obj = cls()
if proto >= 2:
with self.assertRaises((TypeError, ValueError)):
obj.__reduce_ex__(proto)
class C8:
def __getnewargs_ex__(self):
return (args, kwargs)
obj = C8()
for proto in protocols:
if 2 <= proto < 4:
with self.assertRaises(ValueError):
obj.__reduce_ex__(proto)
class C9:
def __getnewargs_ex__(self):
return (args, {})
obj = C9()
for proto in protocols:
self._check_reduce(proto, obj, args)
class C10:
def __getnewargs_ex__(self):
raise IndexError
obj = C10()
for proto in protocols:
if proto >= 2:
with self.assertRaises(IndexError):
obj.__reduce_ex__(proto)
class C11:
def __getstate__(self):
return state
obj = C11()
for proto in protocols:
self._check_reduce(proto, obj, state=state)
class C12:
def __getstate__(self):
return "not dict"
obj = C12()
for proto in protocols:
self._check_reduce(proto, obj, state="not dict")
class C13:
def __getstate__(self):
raise IndexError
obj = C13()
for proto in protocols:
with self.assertRaises(IndexError):
obj.__reduce_ex__(proto)
if proto < 2:
with self.assertRaises(IndexError):
obj.__reduce__()
class C14:
__slots__ = tuple(state)
def __init__(self):
for name, value in state.items():
setattr(self, name, value)
obj = C14()
for proto in protocols:
if proto >= 2:
self._check_reduce(proto, obj, state=(None, state))
else:
with self.assertRaises(TypeError):
obj.__reduce_ex__(proto)
with self.assertRaises(TypeError):
obj.__reduce__()
class C15(dict):
pass
obj = C15({"quebec": -601})
for proto in protocols:
self._check_reduce(proto, obj, dictitems=dict(obj))
class C16(list):
pass
obj = C16(["yukon"])
for proto in protocols:
self._check_reduce(proto, obj, listitems=list(obj))
def test_special_method_lookup(self):
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
class Picky:
def __getstate__(self):
return {}
def __getattr__(self, attr):
if attr in ("__getnewargs__", "__getnewargs_ex__"):
raise AssertionError(attr)
return None
for protocol in protocols:
state = {} if protocol >= 2 else None
self._check_reduce(protocol, Picky(), state=state)
def _assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
if type(obj).__repr__ is object.__repr__:
# We have this limitation for now because we use the object's repr
# to help us verify that the two objects are copies. This allows
# us to delegate the non-generic verification logic to the objects
# themselves.
raise ValueError("object passed to _assert_is_copy must " +
"override the __repr__ method.")
self.assertIsNot(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
self.assertEqual(repr(obj), repr(objcopy), msg=msg)
@staticmethod
def _generate_pickle_copiers():
"""Utility method to generate the many possible pickle configurations.
"""
class PickleCopier:
"This class copies object using pickle."
def __init__(self, proto, dumps, loads):
self.proto = proto
self.dumps = dumps
self.loads = loads
def copy(self, obj):
return self.loads(self.dumps(obj, self.proto))
def __repr__(self):
# We try to be as descriptive as possible here since this is
# the string which we will allow us to tell the pickle
# configuration we are using during debugging.
return ("PickleCopier(proto={}, dumps={}.{}, loads={}.{})"
.format(self.proto,
self.dumps.__module__, self.dumps.__qualname__,
self.loads.__module__, self.loads.__qualname__))
return (PickleCopier(*args) for args in
itertools.product(range(pickle.HIGHEST_PROTOCOL + 1),
{pickle.dumps, pickle._dumps},
{pickle.loads, pickle._loads}))
def test_pickle_slots(self):
# Tests pickling of classes with __slots__.
# Pickling of classes with __slots__ but without __getstate__ should
# fail (if using protocol 0 or 1)
global C
class C:
__slots__ = ['a']
with self.assertRaises(TypeError):
pickle.dumps(C(), 0)
global D
class D(C):
pass
with self.assertRaises(TypeError):
pickle.dumps(D(), 0)
class C:
"A class with __getstate__ and __setstate__ implemented."
__slots__ = ['a']
def __getstate__(self):
state = getattr(self, '__dict__', {}).copy()
for cls in type(self).__mro__:
for slot in cls.__dict__.get('__slots__', ()):
try:
state[slot] = getattr(self, slot)
except AttributeError:
pass
return state
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __repr__(self):
return "%s()<%r>" % (type(self).__name__, self.__getstate__())
class D(C):
"A subclass of a class with slots."
pass
global E
class E(C):
"A subclass with an extra slot."
__slots__ = ['b']
# Now it should work
for pickle_copier in self._generate_pickle_copiers():
with self.subTest(pickle_copier=pickle_copier):
x = C()
y = pickle_copier.copy(x)
self._assert_is_copy(x, y)
x.a = 42
y = pickle_copier.copy(x)
self._assert_is_copy(x, y)
x = D()
x.a = 42
x.b = 100
y = pickle_copier.copy(x)
self._assert_is_copy(x, y)
x = E()
x.a = 42
x.b = "foo"
y = pickle_copier.copy(x)
self._assert_is_copy(x, y)
def test_reduce_copying(self):
# Tests pickling and copying new-style classes and objects.
global C1
class C1:
"The state of this class is copyable via its instance dict."
ARGS = (1, 2)
NEED_DICT_COPYING = True
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def __repr__(self):
return "C1(%r, %r)" % (self.a, self.b)
global C2
class C2(list):
"A list subclass copyable via __getnewargs__."
ARGS = (1, 2)
NEED_DICT_COPYING = False
def __new__(cls, a, b):
self = super().__new__(cls)
self.a = a
self.b = b
return self
def __init__(self, *args):
super().__init__()
# This helps testing that __init__ is not called during the
# unpickling process, which would cause extra appends.
self.append("cheese")
@classmethod
def __getnewargs__(cls):
return cls.ARGS
def __repr__(self):
return "C2(%r, %r)<%r>" % (self.a, self.b, list(self))
global C3
class C3(list):
"A list subclass copyable via __getstate__."
ARGS = (1, 2)
NEED_DICT_COPYING = False
def __init__(self, a, b):
self.a = a
self.b = b
# This helps testing that __init__ is not called during the
# unpickling process, which would cause extra appends.
self.append("cheese")
@classmethod
def __getstate__(cls):
return cls.ARGS
def __setstate__(self, state):
a, b = state
self.a = a
self.b = b
def __repr__(self):
return "C3(%r, %r)<%r>" % (self.a, self.b, list(self))
global C4
class C4(int):
"An int subclass copyable via __getnewargs__."
ARGS = ("hello", "world", 1)
NEED_DICT_COPYING = False
def __new__(cls, a, b, value):
self = super().__new__(cls, value)
self.a = a
self.b = b
return self
@classmethod
def __getnewargs__(cls):
return cls.ARGS
def __repr__(self):
return "C4(%r, %r)<%r>" % (self.a, self.b, int(self))
global C5
class C5(int):
"An int subclass copyable via __getnewargs_ex__."
ARGS = (1, 2)
KWARGS = {'value': 3}
NEED_DICT_COPYING = False
def __new__(cls, a, b, *, value=0):
self = super().__new__(cls, value)
self.a = a
self.b = b
return self
@classmethod
def __getnewargs_ex__(cls):
return (cls.ARGS, cls.KWARGS)
def __repr__(self):
return "C5(%r, %r)<%r>" % (self.a, self.b, int(self))
test_classes = (C1, C2, C3, C4, C5)
# Testing copying through pickle
pickle_copiers = self._generate_pickle_copiers()
for cls, pickle_copier in itertools.product(test_classes, pickle_copiers):
with self.subTest(cls=cls, pickle_copier=pickle_copier):
kwargs = getattr(cls, 'KWARGS', {})
obj = cls(*cls.ARGS, **kwargs)
proto = pickle_copier.proto
if 2 <= proto < 4 and hasattr(cls, '__getnewargs_ex__'):
with self.assertRaises(ValueError):
pickle_copier.dumps(obj, proto)
continue
objcopy = pickle_copier.copy(obj)
self._assert_is_copy(obj, objcopy)
# For test classes that supports this, make sure we didn't go
# around the reduce protocol by simply copying the attribute
# dictionary. We clear attributes using the previous copy to
# not mutate the original argument.
if proto >= 2 and not cls.NEED_DICT_COPYING:
objcopy.__dict__.clear()
objcopy2 = pickle_copier.copy(objcopy)
self._assert_is_copy(obj, objcopy2)
# Testing copying through copy.deepcopy()
for cls in test_classes:
with self.subTest(cls=cls):
kwargs = getattr(cls, 'KWARGS', {})
obj = cls(*cls.ARGS, **kwargs)
# XXX: We need to modify the copy module to support PEP 3154's
# reduce protocol 4.
if hasattr(cls, '__getnewargs_ex__'):
continue
objcopy = deepcopy(obj)
self._assert_is_copy(obj, objcopy)
# For test classes that supports this, make sure we didn't go
# around the reduce protocol by simply copying the attribute
# dictionary. We clear attributes using the previous copy to
# not mutate the original argument.
if not cls.NEED_DICT_COPYING:
objcopy.__dict__.clear()
objcopy2 = deepcopy(objcopy)
self._assert_is_copy(obj, objcopy2)
class SharedKeyTests(unittest.TestCase):
@support.cpython_only
def test_subclasses(self):
# Verify that subclasses can share keys (per PEP 412)
class A:
pass
class B(A):
pass
a, b = A(), B()
self.assertEqual(sys.getsizeof(vars(a)), sys.getsizeof(vars(b)))
self.assertLess(sys.getsizeof(vars(a)), sys.getsizeof({}))
a.x, a.y, a.z, a.w = range(4)
self.assertNotEqual(sys.getsizeof(vars(a)), sys.getsizeof(vars(b)))
a2 = A()
self.assertEqual(sys.getsizeof(vars(a)), sys.getsizeof(vars(a2)))
self.assertLess(sys.getsizeof(vars(a)), sys.getsizeof({}))
b.u, b.v, b.w, b.t = range(4)
self.assertLess(sys.getsizeof(vars(b)), sys.getsizeof({}))
def test_main():
# Run all local test cases, with PTypesLongInitTest first.
support.run_unittest(PTypesLongInitTest, OperatorsTest,
ClassPropertiesAndMethods, DictProxyTests,
MiscTests, PicklingTests, SharedKeyTests)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
Jai-Chaudhary/termite-data-server | web2py/gluon/shell.py | 14 | 15855 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>,
limodou <limodou@gmail.com> and srackham <srackham@gmail.com>.
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import os
import sys
import code
import logging
import types
import re
import optparse
import glob
import traceback
import gluon.fileutils as fileutils
from gluon.settings import global_settings
from gluon.utils import web2py_uuid
from gluon.compileapp import build_environment, read_pyc, run_models_in
from gluon.restricted import RestrictedError
from gluon.globals import Request, Response, Session
from gluon.storage import Storage, List
from gluon.admin import w2p_unpack
from gluon.dal import BaseAdapter
logger = logging.getLogger("web2py")
def enable_autocomplete_and_history(adir,env):
try:
import rlcompleter
import atexit
import readline
except ImportError:
pass
else:
readline.parse_and_bind("bind ^I rl_complete"
if sys.platform == 'darwin'
else "tab: complete")
history_file = os.path.join(adir,'.pythonhistory')
try:
readline.read_history_file(history_file)
except IOError:
open(history_file, 'a').close()
atexit.register(readline.write_history_file, history_file)
readline.set_completer(rlcompleter.Completer(env).complete)
def exec_environment(
pyfile='',
request=None,
response=None,
session=None,
):
"""
.. function:: gluon.shell.exec_environment([pyfile=''[, request=Request()
[, response=Response[, session=Session()]]]])
Environment builder and module loader.
Builds a web2py environment and optionally executes a Python
file into the environment.
A Storage dictionary containing the resulting environment is returned.
The working directory must be web2py root -- this is the web2py default.
"""
if request is None:
request = Request({})
if response is None:
response = Response()
if session is None:
session = Session()
if request.folder is None:
mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile)
if mo:
appname = mo.group('appname')
request.folder = os.path.join('applications', appname)
else:
request.folder = ''
env = build_environment(request, response, session, store_current=False)
if pyfile:
pycfile = pyfile + 'c'
if os.path.isfile(pycfile):
exec read_pyc(pycfile) in env
else:
execfile(pyfile, env)
return Storage(env)
def env(
a,
import_models=False,
c=None,
f=None,
dir='',
extra_request={},
):
"""
Return web2py execution environment for application (a), controller (c),
function (f).
If import_models is True the exec all application models into the
environment.
extra_request allows you to pass along any extra
variables to the request object before your models
get executed. This was mainly done to support
web2py_utils.test_runner, however you can use it
with any wrapper scripts that need access to the
web2py environment.
"""
request = Request({})
response = Response()
session = Session()
request.application = a
# Populate the dummy environment with sensible defaults.
if not dir:
request.folder = os.path.join('applications', a)
else:
request.folder = dir
request.controller = c or 'default'
request.function = f or 'index'
response.view = '%s/%s.html' % (request.controller,
request.function)
if global_settings.cmd_options:
ip = global_settings.cmd_options.ip
port = global_settings.cmd_options.port
else:
ip, port = '127.0.0.1', '8000'
request.env.http_host = '%s:%s' % (ip,port)
request.env.remote_addr = '127.0.0.1'
request.env.web2py_runtime_gae = global_settings.web2py_runtime_gae
for k, v in extra_request.items():
request[k] = v
path_info = '/%s/%s/%s' % (a, c, f)
if request.args:
path_info = '%s/%s' % (path_info, '/'.join(request.args))
if request.vars:
vars = ['%s=%s' % (k,v) if v else '%s' % k
for (k,v) in request.vars.iteritems()]
path_info = '%s?%s' % (path_info, '&'.join(vars))
request.env.path_info = path_info
# Monkey patch so credentials checks pass.
def check_credentials(request, other_application='admin'):
return True
fileutils.check_credentials = check_credentials
environment = build_environment(request, response, session)
if import_models:
try:
run_models_in(environment)
except RestrictedError, e:
sys.stderr.write(e.traceback + '\n')
sys.exit(1)
environment['__name__'] = '__main__'
return environment
def exec_pythonrc():
pythonrc = os.environ.get('PYTHONSTARTUP')
if pythonrc and os.path.isfile(pythonrc):
def execfile_getlocals(file):
execfile(file)
return locals()
try:
return execfile_getlocals(pythonrc)
except NameError:
pass
return dict()
def run(
appname,
plain=False,
import_models=False,
startfile=None,
bpython=False,
python_code=False,
cronjob=False):
"""
Start interactive shell or run Python script (startfile) in web2py
controller environment. appname is formatted like:
a web2py application name
a/c exec the controller c into the application environment
"""
(a, c, f, args, vars) = parse_path_info(appname, av=True)
errmsg = 'invalid application name: %s' % appname
if not a:
die(errmsg)
adir = os.path.join('applications', a)
if not os.path.exists(adir):
if sys.stdin and not sys.stdin.name == '/dev/null':
confirm = raw_input(
'application %s does not exist, create (y/n)?' % a)
else:
logging.warn('application does not exist and will not be created')
return
if confirm.lower() in ['y', 'yes']:
os.mkdir(adir)
w2p_unpack('welcome.w2p', adir)
for subfolder in ['models', 'views', 'controllers', 'databases',
'modules', 'cron', 'errors', 'sessions',
'languages', 'static', 'private', 'uploads']:
subpath = os.path.join(adir, subfolder)
if not os.path.exists(subpath):
os.mkdir(subpath)
db = os.path.join(adir, 'models/db.py')
if os.path.exists(db):
data = fileutils.read_file(db)
data = data.replace(
'<your secret key>', 'sha512:' + web2py_uuid())
fileutils.write_file(db, data)
if c:
import_models = True
extra_request = {}
if args:
extra_request['args'] = args
if vars:
extra_request['vars'] = vars
_env = env(a, c=c, f=f, import_models=import_models, extra_request=extra_request)
if c:
pyfile = os.path.join('applications', a, 'controllers', c + '.py')
pycfile = os.path.join('applications', a, 'compiled',
"controllers_%s_%s.pyc" % (c, f))
if ((cronjob and os.path.isfile(pycfile))
or not os.path.isfile(pyfile)):
exec read_pyc(pycfile) in _env
elif os.path.isfile(pyfile):
execfile(pyfile, _env)
else:
die(errmsg)
if f:
exec ('print %s()' % f, _env)
return
_env.update(exec_pythonrc())
if startfile:
try:
ccode = None
if startfile.endswith('.pyc'):
ccode = read_pyc(startfile)
exec ccode in _env
else:
execfile(startfile, _env)
if import_models:
BaseAdapter.close_all_instances('commit')
except Exception, e:
print traceback.format_exc()
if import_models:
BaseAdapter.close_all_instances('rollback')
elif python_code:
try:
exec(python_code, _env)
if import_models:
BaseAdapter.close_all_instances('commit')
except Exception, e:
print traceback.format_exc()
if import_models:
BaseAdapter.close_all_instances('rollback')
else:
if not plain:
if bpython:
try:
import bpython
bpython.embed(locals_=_env)
return
except:
logger.warning(
'import bpython error; trying ipython...')
else:
try:
import IPython
if IPython.__version__ > '1.0.0':
IPython.start_ipython(user_ns=_env)
return
elif IPython.__version__ == '1.0.0':
from IPython.terminal.embed import InteractiveShellEmbed
shell = InteractiveShellEmbed(user_ns=_env)
shell()
return
elif IPython.__version__ >= '0.11':
from IPython.frontend.terminal.embed import InteractiveShellEmbed
shell = InteractiveShellEmbed(user_ns=_env)
shell()
return
else:
# following 2 lines fix a problem with
# IPython; thanks Michael Toomim
if '__builtins__' in _env:
del _env['__builtins__']
shell = IPython.Shell.IPShell(argv=[], user_ns=_env)
shell.mainloop()
return
except:
logger.warning(
'import IPython error; use default python shell')
enable_autocomplete_and_history(adir,_env)
code.interact(local=_env)
def parse_path_info(path_info, av=False):
"""
Parse path info formatted like a/c/f where c and f are optional
and a leading / accepted.
Return tuple (a, c, f). If invalid path_info a is set to None.
If c or f are omitted they are set to None.
If av=True, parse args and vars
"""
if av:
vars = None
if '?' in path_info:
path_info, query = path_info.split('?', 2)
vars = Storage()
for var in query.split('&'):
(var, val) = var.split('=', 2) if '=' in var else (var, None)
vars[var] = val
items = List(path_info.split('/'))
args = List(items[3:]) if len(items) > 3 else None
return (items(0), items(1), items(2), args, vars)
mo = re.match(r'^/?(?P<a>\w+)(/(?P<c>\w+)(/(?P<f>\w+))?)?$',
path_info)
if mo:
return (mo.group('a'), mo.group('c'), mo.group('f'))
else:
return (None, None, None)
def die(msg):
print >> sys.stderr, msg
sys.exit(1)
def test(testpath, import_models=True, verbose=False):
"""
Run doctests in web2py environment. testpath is formatted like:
a tests all controllers in application a
a/c tests controller c in application a
a/c/f test function f in controller c, application a
Where a, c and f are application, controller and function names
respectively. If the testpath is a file name the file is tested.
If a controller is specified models are executed by default.
"""
import doctest
if os.path.isfile(testpath):
mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath)
if not mo:
die('test file is not in application directory: %s'
% testpath)
a = mo.group('a')
c = f = None
files = [testpath]
else:
(a, c, f) = parse_path_info(testpath)
errmsg = 'invalid test path: %s' % testpath
if not a:
die(errmsg)
cdir = os.path.join('applications', a, 'controllers')
if not os.path.isdir(cdir):
die(errmsg)
if c:
cfile = os.path.join(cdir, c + '.py')
if not os.path.isfile(cfile):
die(errmsg)
files = [cfile]
else:
files = glob.glob(os.path.join(cdir, '*.py'))
for testfile in files:
globs = env(a, import_models)
ignores = globs.keys()
execfile(testfile, globs)
def doctest_object(name, obj):
"""doctest obj and enclosed methods and classes."""
if type(obj) in (types.FunctionType, types.TypeType,
types.ClassType, types.MethodType,
types.UnboundMethodType):
# Reload environment before each test.
globs = env(a, c=c, f=f, import_models=import_models)
execfile(testfile, globs)
doctest.run_docstring_examples(
obj, globs=globs,
name='%s: %s' % (os.path.basename(testfile),
name), verbose=verbose)
if type(obj) in (types.TypeType, types.ClassType):
for attr_name in dir(obj):
# Execute . operator so decorators are executed.
o = eval('%s.%s' % (name, attr_name), globs)
doctest_object(attr_name, o)
for (name, obj) in globs.items():
if name not in ignores and (f is None or f == name):
doctest_object(name, obj)
def get_usage():
usage = """
%prog [options] pythonfile
"""
return usage
def execute_from_command_line(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(usage=get_usage())
parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME',
help='run web2py in interactive shell ' +
'or IPython(if installed) with specified appname')
msg = 'run web2py in interactive shell or bpython (if installed) with'
msg += ' specified appname (if app does not exist it will be created).'
msg += '\n Use combined with --shell'
parser.add_option(
'-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg,
)
parser.add_option(
'-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help='only use plain python shell, should be used with --shell option',
)
parser.add_option(
'-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help='auto import model files, default is False, ' +
' should be used with --shell option',
)
parser.add_option(
'-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help='run PYTHON_FILE in web2py environment, ' +
'should be used with --shell option',
)
(options, args) = parser.parse_args(argv[1:])
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
if len(args) > 0:
startfile = args[0]
else:
startfile = ''
run(options.shell, options.plain, startfile=startfile,
bpython=options.bpython)
if __name__ == '__main__':
execute_from_command_line()
| bsd-3-clause |
xujun10110/pyspider | tests/test_message_queue.py | 55 | 7864 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-07 10:33:38
import os
import six
import time
import unittest2 as unittest
from pyspider.libs import utils
class TestMessageQueue(object):
@classmethod
def setUpClass(self):
raise NotImplementedError
def test_10_put(self):
self.assertEqual(self.q1.qsize(), 0)
self.assertEqual(self.q2.qsize(), 0)
self.q1.put('TEST_DATA1', timeout=3)
self.q1.put('TEST_DATA2_中文', timeout=3)
time.sleep(0.01)
self.assertEqual(self.q1.qsize(), 2)
self.assertEqual(self.q2.qsize(), 2)
def test_20_get(self):
self.assertEqual(self.q1.get(timeout=0.01), 'TEST_DATA1')
self.assertEqual(self.q2.get_nowait(), 'TEST_DATA2_中文')
with self.assertRaises(self.q1.Empty):
self.q2.get(timeout=0.01)
with self.assertRaises(self.q1.Empty):
self.q2.get_nowait()
def test_30_full(self):
self.assertEqual(self.q1.qsize(), 0)
self.assertEqual(self.q2.qsize(), 0)
for i in range(2):
self.q1.put_nowait('TEST_DATA%d' % i)
for i in range(3):
self.q2.put('TEST_DATA%d' % i)
with self.assertRaises(self.q1.Full):
self.q1.put('TEST_DATA6', timeout=0.01)
with self.assertRaises(self.q1.Full):
self.q1.put_nowait('TEST_DATA6')
def test_40_multiple_threading_error(self):
def put(q):
for i in range(100):
q.put("DATA_%d" % i)
def get(q):
for i in range(100):
q.get()
utils.run_in_thread(put, self.q3)
get(self.q3)
@unittest.skipIf(six.PY3, 'pika not suport python 3')
@unittest.skipIf(os.environ.get('IGNORE_RABBITMQ'), 'no rabbitmq server for test.')
class TestPikaRabbitMQ(TestMessageQueue, unittest.TestCase):
@classmethod
def setUpClass(self):
from pyspider.message_queue import rabbitmq
with utils.timeout(3):
self.q1 = rabbitmq.PikaQueue('test_queue', maxsize=5)
self.q2 = rabbitmq.PikaQueue('test_queue', amqp_url='amqp://localhost:5672/%2F', maxsize=5)
self.q3 = rabbitmq.PikaQueue('test_queue_for_threading_test', amqp_url='amqp://guest:guest@localhost:5672/')
self.q2.delete()
self.q2.reconnect()
self.q3.delete()
self.q3.reconnect()
@classmethod
def tearDownClass(self):
self.q2.delete()
self.q3.delete()
del self.q1
del self.q2
del self.q3
@unittest.skipIf(os.environ.get('IGNORE_RABBITMQ'), 'no rabbitmq server for test.')
class TestAmqpRabbitMQ(TestMessageQueue, unittest.TestCase):
@classmethod
def setUpClass(self):
from pyspider.message_queue import connect_message_queue
with utils.timeout(3):
self.q1 = connect_message_queue('test_queue', 'amqp://localhost:5672/',
maxsize=5)
self.q2 = connect_message_queue('test_queue', 'amqp://localhost:5672/%2F',
maxsize=5)
self.q3 = connect_message_queue('test_queue_for_threading_test',
'amqp://guest:guest@localhost:5672/')
self.q2.delete()
self.q2.reconnect()
self.q3.delete()
self.q3.reconnect()
@classmethod
def tearDownClass(self):
self.q2.delete()
self.q3.delete()
del self.q1
del self.q2
del self.q3
#@unittest.skipIf(True, "beanstalk queue can't pass the test currently")
@unittest.skipIf(six.PY3, 'beanstalkc not suport python 3')
@unittest.skipIf(os.environ.get('IGNORE_BEANSTALK'), 'no beanstalk server for test.')
class TestBeansTalkQueue(TestMessageQueue, unittest.TestCase):
@classmethod
def setUpClass(self):
from pyspider.message_queue import connect_message_queue
with utils.timeout(3):
self.q1 = connect_message_queue('test_queue', 'beanstalk://localhost:11300',
maxsize=5)
self.q2 = connect_message_queue('test_queue', 'beanstalk://localhost:11300',
maxsize=5)
self.q3 = connect_message_queue('test_queue_for_threading_test',
'beanstalk://localhost:11300')
while not self.q1.empty():
self.q1.get()
while not self.q2.empty():
self.q2.get()
while not self.q3.empty():
self.q3.get()
@classmethod
def tearDownClass(self):
while not self.q1.empty():
self.q1.get()
while not self.q2.empty():
self.q2.get()
while not self.q3.empty():
self.q3.get()
@unittest.skipIf(os.environ.get('IGNORE_REDIS'), 'no redis server for test.')
class TestRedisQueue(TestMessageQueue, unittest.TestCase):
@classmethod
def setUpClass(self):
from pyspider.message_queue import connect_message_queue
from pyspider.message_queue import redis_queue
with utils.timeout(3):
self.q1 = redis_queue.RedisQueue('test_queue', maxsize=5, lazy_limit=False)
self.q2 = redis_queue.RedisQueue('test_queue', maxsize=5, lazy_limit=False)
self.q3 = connect_message_queue('test_queue_for_threading_test',
'redis://localhost:6379/')
while not self.q1.empty():
self.q1.get()
while not self.q2.empty():
self.q2.get()
while not self.q3.empty():
self.q3.get()
@classmethod
def tearDownClass(self):
while not self.q1.empty():
self.q1.get()
while not self.q2.empty():
self.q2.get()
while not self.q3.empty():
self.q3.get()
class TestKombuQueue(TestMessageQueue, unittest.TestCase):
kombu_url = 'kombu+memory://'
@classmethod
def setUpClass(self):
from pyspider.message_queue import connect_message_queue
with utils.timeout(3):
self.q1 = connect_message_queue('test_queue', self.kombu_url, maxsize=5)
self.q2 = connect_message_queue('test_queue', self.kombu_url, maxsize=5)
self.q3 = connect_message_queue('test_queue_for_threading_test', self.kombu_url)
while not self.q1.empty():
self.q1.get()
while not self.q2.empty():
self.q2.get()
while not self.q3.empty():
self.q3.get()
@classmethod
def tearDownClass(self):
while not self.q1.empty():
self.q1.get()
self.q1.delete()
while not self.q2.empty():
self.q2.get()
self.q2.delete()
while not self.q3.empty():
self.q3.get()
self.q3.delete()
@unittest.skip('test cannot pass, get is buffered')
@unittest.skipIf(os.environ.get('IGNORE_RABBITMQ'), 'no rabbitmq server for test.')
class TestKombuAmpqQueue(TestKombuQueue):
kombu_url = 'kombu+amqp://'
@unittest.skip('test cannot pass, put is buffered')
@unittest.skipIf(os.environ.get('IGNORE_REDIS'), 'no redis server for test.')
class TestKombuRedisQueue(TestKombuQueue):
kombu_url = 'kombu+redis://'
@unittest.skip('test cannot pass, get is buffered')
@unittest.skipIf(os.environ.get('IGNORE_BEANSTALK'), 'no beanstalk server for test.')
class TestKombuBeanstalkQueue(TestKombuQueue):
kombu_url = 'kombu+beanstalk://'
@unittest.skipIf(os.environ.get('IGNORE_MONGODB'), 'no rabbitmq server for test.')
class TestKombuMongoDBQueue(TestKombuQueue):
kombu_url = 'kombu+mongodb://'
| apache-2.0 |
crr0004/taiga-back | taiga/base/api/parsers.py | 18 | 7957 | # Copyright (C) 2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code is partially taken from django-rest-framework:
# Copyright (c) 2011-2014, Tom Christie
"""
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
"""
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header, ChunkIter
from django.utils import six
from taiga.base.exceptions import ParseError
from taiga.base.api import renderers
import json
import datetime
import decimal
class DataAndFiles(object):
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser(object):
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
class JSONParser(BaseParser):
"""
Parses JSON-serialized data.
"""
media_type = "application/json"
renderer_class = renderers.UnicodeJSONRenderer
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return json.loads(data)
except ValueError as exc:
raise ParseError("JSON parse error - %s" % six.text_type(exc))
class FormParser(BaseParser):
"""
Parser for form data.
"""
media_type = "application/x-www-form-urlencoded"
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
"""
parser_context = parser_context or {}
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
data = QueryDict(stream.read(), encoding=encoding)
return data
class MultiPartParser(BaseParser):
"""
Parser for multipart form data, which may include file data.
"""
media_type = "multipart/form-data"
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a multipart encoded form,
and returns a DataAndFiles object.
`.data` will be a `QueryDict` containing all the form parameters.
`.files` will be a `QueryDict` containing all the form files.
"""
parser_context = parser_context or {}
request = parser_context["request"]
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
meta = request.META.copy()
meta["CONTENT_TYPE"] = media_type
upload_handlers = request.upload_handlers
try:
parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding)
data, files = parser.parse()
return DataAndFiles(data, files)
except MultiPartParserError as exc:
raise ParseError("Multipart form parse error - %s" % str(exc))
class FileUploadParser(BaseParser):
"""
Parser for file upload data.
"""
media_type = "*/*"
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DateAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one "file" element.
"""
parser_context = parser_context or {}
request = parser_context["request"]
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get("HTTP_CONTENT_TYPE",
meta.get("CONTENT_TYPE", ""))
try:
content_length = int(meta.get("HTTP_CONTENT_LENGTH",
meta.get("CONTENT_LENGTH", 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(None,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles(None, {"file": result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for handler in upload_handlers:
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
break
for chunk in chunks:
for i, handler in enumerate(upload_handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[i])
counters[i] += chunk_length
if chunk is None:
break
for i, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
return DataAndFiles(None, {"file": file_obj})
raise ParseError("FileUpload parse error - "
"none of upload handlers can handle the stream")
def get_filename(self, stream, media_type, parser_context):
"""
Detects the uploaded file name. First searches a "filename" url kwarg.
Then tries to parse Content-Disposition header.
"""
try:
return parser_context["kwargs"]["filename"]
except KeyError:
pass
try:
meta = parser_context["request"].META
disposition = parse_header(meta["HTTP_CONTENT_DISPOSITION"])
return disposition[1]["filename"]
except (AttributeError, KeyError):
pass
| agpl-3.0 |
dgwakeman/mne-python | mne/viz/circle.py | 13 | 15446 | """Functions to plot on circle as for connectivity
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from ..externals.six import string_types
from ..fixes import tril_indices, normalize_colors
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(len(node_names,))
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolates connections around a single node when user left clicks a node.
On right click, resets all connections."""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, string_types):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additonal space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initalize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
if show:
plt.show()
return fig, axes
| bsd-3-clause |
eeshangarg/oh-mainline | vendor/packages/python-social-auth/social/backends/openstreetmap.py | 83 | 1920 | """
OpenStreetMap OAuth support.
This adds support for OpenStreetMap OAuth service. An application must be
registered first on OpenStreetMap and the settings
SOCIAL_AUTH_OPENSTREETMAP_KEY and SOCIAL_AUTH_OPENSTREETMAP_SECRET
must be defined with the corresponding values.
More info: http://wiki.openstreetmap.org/wiki/OAuth
"""
from xml.dom import minidom
from social.backends.oauth import BaseOAuth1
class OpenStreetMapOAuth(BaseOAuth1):
"""OpenStreetMap OAuth authentication backend"""
name = 'openstreetmap'
AUTHORIZATION_URL = 'http://www.openstreetmap.org/oauth/authorize'
REQUEST_TOKEN_URL = 'http://www.openstreetmap.org/oauth/request_token'
ACCESS_TOKEN_URL = 'http://www.openstreetmap.org/oauth/access_token'
EXTRA_DATA = [
('id', 'id'),
('avatar', 'avatar'),
('account_created', 'account_created')
]
def get_user_details(self, response):
"""Return user details from OpenStreetMap account"""
return {
'username': response['username'],
'email': '',
'fullname': '',
'first_name': '',
'last_name': ''
}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
response = self.oauth_request(
access_token, 'http://api.openstreetmap.org/api/0.6/user/details'
)
try:
dom = minidom.parseString(response.content)
except ValueError:
return None
user = dom.getElementsByTagName('user')[0]
try:
avatar = dom.getElementsByTagName('img')[0].getAttribute('href')
except IndexError:
avatar = None
return {
'id': user.getAttribute('id'),
'username': user.getAttribute('display_name'),
'account_created': user.getAttribute('account_created'),
'avatar': avatar
}
| agpl-3.0 |
csrocha/OpenUpgrade | addons/l10n_nl/__init__.py | 424 | 1413 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Veritos - Jan Verlaan - www.veritos.nl
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Veritos.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
amit0701/rally | rally/plugins/openstack/scenarios/nova/servers.py | 1 | 37235 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from rally.common import logging
from rally import consts
from rally import exceptions as rally_exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.plugins.openstack.scenarios.nova import utils
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import types
from rally.task import utils as task_utils
from rally.task import validation
LOG = logging.getLogger(__name__)
class NovaServers(utils.NovaScenario,
cinder_utils.CinderScenario):
"""Benchmark scenarios for Nova servers."""
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_list_server(self, image, flavor,
detailed=True, **kwargs):
"""Boot a server from an image and then list all servers.
Measure the "nova list" command performance.
If you have only 1 user in your context, you will
add 1 server on every iteration. So you will have more
and more servers and will be able to measure the
performance of the "nova list" command depending on
the number of servers owned by users.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param detailed: True if the server listing should contain
detailed information about all of them
:param kwargs: Optional additional arguments for server creation
"""
self._boot_server(image, flavor, **kwargs)
self._list_servers(detailed)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def list_servers(self, detailed=True):
"""List all servers.
This simple scenario test the nova list command by listing
all the servers.
:param detailed: True if detailed information about servers
should be listed
"""
self._list_servers(detailed)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_delete_server(self, image, flavor,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot and delete a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_delete_multiple_servers(self, image, flavor, count=2,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot multiple servers in a single request and delete them.
Deletion is done in parallel with one request per server, not
with a single request for all servers.
:param image: The image to boot from
:param flavor: Flavor used to boot instance
:param count: Number of instances to boot
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for instance creation
"""
servers = self._boot_servers(image, flavor, 1, instances_amount=count,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_servers(servers, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_delete(self, image, flavor,
volume_size,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot a server from volume and then delete it.
The scenario first creates a volume and then a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image)
block_device_mapping = {"vda": "%s:::1" % volume.id}
server = self._boot_server(image, flavor,
block_device_mapping=block_device_mapping,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_bounce_server(self, image, flavor,
force_delete=False, actions=None, **kwargs):
"""Boot a server and run specified actions against it.
Actions should be passed into the actions parameter. Available actions
are 'hard_reboot', 'soft_reboot', 'stop_start' and 'rescue_unrescue'.
Delete server after all actions were completed.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param actions: list of action dictionaries, where each action
dictionary speicifes an action to be performed
in the following format:
{"action_name": <no_of_iterations>}
:param kwargs: Optional additional arguments for server creation
"""
action_builder = self._bind_actions()
actions = actions or []
try:
action_builder.validate(actions)
except jsonschema.exceptions.ValidationError as error:
raise rally_exceptions.InvalidConfigException(
"Invalid server actions configuration \'%(actions)s\' due to: "
"%(error)s" % {"actions": str(actions), "error": str(error)})
server = self._boot_server(image, flavor, **kwargs)
for action in action_builder.build_actions(actions, server):
action()
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_lock_unlock_and_delete(self, image, flavor,
min_sleep=0, max_sleep=0,
force_delete=False,
**kwargs):
"""Boot a server, lock it, then unlock and delete it.
Optional 'min_sleep' and 'max_sleep' parameters allow the
scenario to simulate a pause between locking and unlocking the
server (of random duration from min_sleep to max_sleep).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param min_sleep: Minimum sleep time between locking and unlocking
in seconds
:param max_sleep: Maximum sleep time between locking and unlocking
in seconds
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._lock_server(server)
self.sleep_between(min_sleep, max_sleep)
self._unlock_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "glance"]})
def snapshot_server(self, image, flavor,
force_delete=False, **kwargs):
"""Boot a server, make its snapshot and delete both.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
image = self._create_image(server)
self._delete_server(server, force=force_delete)
server = self._boot_server(image.id, flavor, **kwargs)
self._delete_server(server, force=force_delete)
self._delete_image(image)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_server(self, image, flavor, auto_assign_nic=False, **kwargs):
"""Boot a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param auto_assign_nic: True if NICs should be assigned
:param kwargs: Optional additional arguments for server creation
"""
self._boot_server(image, flavor,
auto_assign_nic=auto_assign_nic, **kwargs)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume(self, image, flavor, volume_size,
auto_assign_nic=False, **kwargs):
"""Boot a server from volume.
The scenario first creates a volume and then a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param auto_assign_nic: True if NICs should be assigned
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image)
block_device_mapping = {"vda": "%s:::1" % volume.id}
self._boot_server(image, flavor, auto_assign_nic=auto_assign_nic,
block_device_mapping=block_device_mapping,
**kwargs)
def _bind_actions(self):
actions = ["hard_reboot", "soft_reboot", "stop_start",
"rescue_unrescue"]
action_builder = task_utils.ActionBuilder(actions)
action_builder.bind_action("hard_reboot", self._reboot_server)
action_builder.bind_action("soft_reboot", self._soft_reboot_server)
action_builder.bind_action("stop_start",
self._stop_and_start_server)
action_builder.bind_action("rescue_unrescue",
self._rescue_and_unrescue_server)
return action_builder
def _stop_and_start_server(self, server):
"""Stop and then start the given server.
A stop will be issued on the given server upon which time
this method will wait for the server to become 'SHUTOFF'.
Once the server is SHUTOFF a start will be issued and this
method will wait for the server to become 'ACTIVE' again.
:param server: The server to stop and then start.
"""
self._stop_server(server)
self._start_server(server)
def _rescue_and_unrescue_server(self, server):
"""Rescue and then unrescue the given server.
A rescue will be issued on the given server upon which time
this method will wait for the server to become 'RESCUE'.
Once the server is RESCUE an unrescue will be issued and
this method will wait for the server to become 'ACTIVE'
again.
:param server: The server to rescue and then unrescue.
"""
self._rescue_server(server)
self._unrescue_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType,
to_flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def resize_server(self, image, flavor, to_flavor,
force_delete=False, **kwargs):
"""Boot a server, then resize and delete it.
This test will confirm the resize by default,
or revert the resize if confirm is set to false.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param to_flavor: flavor to be used to resize the booted instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._resize(server, to_flavor)
# by default we confirm
confirm = kwargs.get("confirm", True)
if confirm:
self._resize_confirm(server)
else:
self._resize_revert(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType,
to_flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def boot_server_attach_created_volume_and_resize(
self, image, flavor, to_flavor, volume_size, min_sleep=0,
max_sleep=0, force_delete=False, confirm=True, do_delete=True,
boot_server_kwargs=None, create_volume_kwargs=None):
"""Create a VM from image, attach a volume to it and resize.
Simple test to create a VM and attach a volume, then resize the VM,
detach the volume then delete volume and VM.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between attaching a volume and running resize
(of random duration from range [min_sleep, max_sleep]).
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param to_flavor: flavor to be used to resize the booted instance
:param volume_size: volume size (in GB)
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param confirm: True if need to confirm resize else revert resize
:param do_delete: True if resources needs to be deleted explicitly
else use rally cleanup to remove resources
:param boot_server_kwargs: optional arguments for VM creation
:param create_volume_kwargs: optional arguments for volume creation
"""
boot_server_kwargs = boot_server_kwargs or {}
create_volume_kwargs = create_volume_kwargs or {}
server = self._boot_server(image, flavor, **boot_server_kwargs)
volume = self._create_volume(volume_size, **create_volume_kwargs)
self._attach_volume(server, volume)
self.sleep_between(min_sleep, max_sleep)
self._resize(server, to_flavor)
if confirm:
self._resize_confirm(server)
else:
self._resize_revert(server)
if do_delete:
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType,
to_flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_resize(
self, image, flavor, to_flavor, volume_size, min_sleep=0,
max_sleep=0, force_delete=False, confirm=True, do_delete=True,
boot_server_kwargs=None, create_volume_kwargs=None):
"""Boot a server from volume, then resize and delete it.
The scenario first creates a volume and then a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
This test will confirm the resize by default,
or revert the resize if confirm is set to false.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param to_flavor: flavor to be used to resize the booted instance
:param volume_size: volume size (in GB)
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param confirm: True if need to confirm resize else revert resize
:param do_delete: True if resources needs to be deleted explicitly
else use rally cleanup to remove resources
:param boot_server_kwargs: optional arguments for VM creation
:param create_volume_kwargs: optional arguments for volume creation
"""
boot_server_kwargs = boot_server_kwargs or {}
create_volume_kwargs = create_volume_kwargs or {}
if boot_server_kwargs.get("block_device_mapping"):
LOG.warning("Using already existing volume is not permitted.")
volume = self._create_volume(volume_size, imageRef=image,
**create_volume_kwargs)
boot_server_kwargs["block_device_mapping"] = {
"vda": "%s:::1" % volume.id}
server = self._boot_server(image, flavor, **boot_server_kwargs)
self.sleep_between(min_sleep, max_sleep)
self._resize(server, to_flavor)
if confirm:
self._resize_confirm(server)
else:
self._resize_revert(server)
if do_delete:
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def suspend_and_resume_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, suspend, resume and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._suspend_server(server)
self._resume_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def pause_and_unpause_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, pause, unpause and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._pause_server(server)
self._unpause_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def shelve_and_unshelve_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, shelve, unshelve and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._shelve_server(server)
self._unshelve_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_live_migrate_server(self, image,
flavor, block_migration=False,
disk_over_commit=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Live Migrate a server.
This scenario launches a VM on a compute node available in
the availability zone and then migrates the VM to another
compute node on the same availability zone.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between VM booting and running live migration
(of random duration from range [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._delete_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_live_migrate(self, image, flavor,
volume_size,
block_migration=False,
disk_over_commit=False,
force_delete=False,
min_sleep=0, max_sleep=0,
**kwargs):
"""Boot a server from volume and then migrate it.
The scenario first creates a volume and a server booted from
the volume on a compute node available in the availability zone and
then migrates the VM to another compute node on the same availability
zone.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between VM booting and running live migration
(of random duration from range [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param force_delete: True if force_delete should be used
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image)
block_device_mapping = {"vda": "%s:::1" % volume.id}
server = self._boot_server(image, flavor,
block_device_mapping=block_device_mapping,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def boot_server_attach_created_volume_and_live_migrate(
self,
image,
flavor,
size,
block_migration=False,
disk_over_commit=False,
boot_server_kwargs=None,
create_volume_kwargs=None,
min_sleep=0,
max_sleep=0):
"""Create a VM, attach a volume to it and live migrate.
Simple test to create a VM and attach a volume, then migrate the VM,
detach the volume and delete volume/VM.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between attaching a volume and running live
migration (of random duration from range [min_sleep, max_sleep]).
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param size: volume size (in GB)
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param boot_server_kwargs: optional arguments for VM creation
:param create_volume_kwargs: optional arguments for volume creation
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
"""
if boot_server_kwargs is None:
boot_server_kwargs = {}
if create_volume_kwargs is None:
create_volume_kwargs = {}
server = self._boot_server(image, flavor, **boot_server_kwargs)
volume = self._create_volume(size, **create_volume_kwargs)
self._attach_volume(server, volume)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_migrate_server(self, image, flavor, **kwargs):
"""Migrate a server.
This scenario launches a VM on a compute node available in
the availability zone and stops the VM, and then migrates the VM
to another compute node on the same availability zone.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._stop_server(server)
self._migrate(server)
# NOTE(wtakase): This is required because cold migration and resize
# share same code path.
confirm = kwargs.get("confirm", True)
if confirm:
self._resize_confirm(server, status="SHUTOFF")
else:
self._resize_revert(server, status="SHUTOFF")
self._delete_server(server)
@types.set(from_image=types.ImageResourceType,
to_image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "from_image")
@validation.image_valid_on_flavor("flavor", "to_image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_rebuild_server(self, from_image, to_image, flavor, **kwargs):
"""Rebuild a server.
This scenario launches a VM, then rebuilds that VM with a
different image.
:param from_image: image to be used to boot an instance
:param to_image: image to be used to rebuild the instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(from_image, flavor, **kwargs)
self._rebuild_server(server, to_image)
self._delete_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@validation.required_contexts("network")
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_associate_floating_ip(self, image, flavor, **kwargs):
"""Boot a server and associate a floating IP to it.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
address = network_wrapper.wrap(self.clients, self).create_floating_ip(
tenant_id=server.tenant_id)
self._associate_floating_ip(server, address["ip"])
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_show_server(self, image, flavor, **kwargs):
"""Show server details.
This simple scenario tests the nova show command by retrieving
the server details.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
:returns: Server details
"""
server = self._boot_server(image, flavor, **kwargs)
self._show_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_get_console_output(self, image, flavor,
length=None, **kwargs):
"""Get text console output from server.
This simple scenario tests the nova console-log command by retrieving
the text console log output.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param length: The number of tail log lines you would like to retrieve.
None (default value) or -1 means unlimited length.
:param kwargs: Optional additional arguments for server creation
:returns: Text console log output for server
"""
server = self._boot_server(image, flavor, **kwargs)
self._get_server_console_output(server, length)
| apache-2.0 |
pacoqueen/bbinn | PyChart-1.39/pychart/afm/Helvetica_Narrow.py | 12 | 1488 | # AFM font Helvetica-Narrow (path: /usr/share/fonts/afms/adobe/phvr8an.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Helvetica-Narrow"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 228, 228, 291, 456, 456, 729, 547, 182, 273, 273, 319, 479, 228, 273, 228, 228, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 228, 228, 479, 479, 479, 456, 832, 547, 547, 592, 592, 547, 501, 638, 592, 228, 410, 547, 456, 683, 592, 638, 547, 638, 592, 547, 501, 592, 547, 774, 547, 547, 501, 228, 228, 228, 385, 456, 182, 456, 456, 410, 456, 456, 228, 456, 456, 182, 182, 410, 182, 683, 456, 456, 456, 456, 273, 410, 228, 456, 410, 592, 410, 410, 410, 274, 213, 274, 479, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 273, 456, 456, 137, 456, 456, 456, 456, 157, 273, 456, 273, 273, 410, 410, 500, 456, 456, 456, 228, 500, 440, 287, 182, 273, 273, 456, 820, 820, 500, 501, 500, 273, 273, 273, 273, 273, 273, 273, 273, 500, 273, 273, 500, 273, 273, 273, 820, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 820, 500, 303, 500, 500, 500, 500, 456, 638, 820, 299, 500, 500, 500, 500, 500, 729, 500, 500, 500, 228, 500, 500, 182, 501, 774, 501, )
| gpl-2.0 |
alex/boto | tests/unit/cloudsearch/test_connection.py | 114 | 8513 | #!/usr/bin env python
from tests.unit import AWSMockServiceTestCase
from boto.cloudsearch.domain import Domain
from boto.cloudsearch.layer1 import Layer1
class TestCloudSearchCreateDomain(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return b"""
<CreateDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
<CreateDomainResult>
<DomainStatus>
<SearchPartitionCount>0</SearchPartitionCount>
<SearchService>
<Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn>
<Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</SearchService>
<NumSearchableDocs>0</NumSearchableDocs>
<Created>true</Created>
<DomainId>1234567890/demo</DomainId>
<Processing>false</Processing>
<SearchInstanceCount>0</SearchInstanceCount>
<DomainName>demo</DomainName>
<RequiresIndexDocuments>false</RequiresIndexDocuments>
<Deleted>false</Deleted>
<DocService>
<Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn>
<Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</DocService>
</DomainStatus>
</CreateDomainResult>
<ResponseMetadata>
<RequestId>00000000-0000-0000-0000-000000000000</RequestId>
</ResponseMetadata>
</CreateDomainResponse>
"""
def test_create_domain(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
self.assert_request_parameters({
'Action': 'CreateDomain',
'DomainName': 'demo',
'Version': '2011-02-01',
})
def test_cloudsearch_connect_result_endpoints(self):
"""Check that endpoints & ARNs are correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
self.assertEqual(domain.doc_service_arn,
"arn:aws:cs:us-east-1:1234567890:doc/demo")
self.assertEqual(
domain.doc_service_endpoint,
"doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
self.assertEqual(domain.search_service_arn,
"arn:aws:cs:us-east-1:1234567890:search/demo")
self.assertEqual(
domain.search_service_endpoint,
"search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
def test_cloudsearch_connect_result_statuses(self):
"""Check that domain statuses are correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
self.assertEqual(domain.created, True)
self.assertEqual(domain.processing, False)
self.assertEqual(domain.requires_index_documents, False)
self.assertEqual(domain.deleted, False)
def test_cloudsearch_connect_result_details(self):
"""Check that the domain information is correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
self.assertEqual(domain.id, "1234567890/demo")
self.assertEqual(domain.name, "demo")
def test_cloudsearch_documentservice_creation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
document = domain.get_document_service()
self.assertEqual(
document.endpoint,
"doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
def test_cloudsearch_searchservice_creation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
search = domain.get_search_service()
self.assertEqual(
search.endpoint,
"search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return b"""
<DeleteDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
<DeleteDomainResult>
<DomainStatus>
<SearchPartitionCount>0</SearchPartitionCount>
<SearchService>
<Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn>
<Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</SearchService>
<NumSearchableDocs>0</NumSearchableDocs>
<Created>true</Created>
<DomainId>1234567890/demo</DomainId>
<Processing>false</Processing>
<SearchInstanceCount>0</SearchInstanceCount>
<DomainName>demo</DomainName>
<RequiresIndexDocuments>false</RequiresIndexDocuments>
<Deleted>false</Deleted>
<DocService>
<Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn>
<Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</DocService>
</DomainStatus>
</DeleteDomainResult>
<ResponseMetadata>
<RequestId>00000000-0000-0000-0000-000000000000</RequestId>
</ResponseMetadata>
</DeleteDomainResponse>
"""
def test_cloudsearch_deletion(self):
"""
Check that the correct arguments are sent to AWS when creating a
cloudsearch connection.
"""
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_domain('demo')
self.assert_request_parameters({
'Action': 'DeleteDomain',
'DomainName': 'demo',
'Version': '2011-02-01',
})
class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return b"""
<IndexDocumentsResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
<IndexDocumentsResult>
<FieldNames>
<member>average_score</member>
<member>brand_id</member>
<member>colors</member>
<member>context</member>
<member>context_owner</member>
<member>created_at</member>
<member>creator_id</member>
<member>description</member>
<member>file_size</member>
<member>format</member>
<member>has_logo</member>
<member>has_messaging</member>
<member>height</member>
<member>image_id</member>
<member>ingested_from</member>
<member>is_advertising</member>
<member>is_photo</member>
<member>is_reviewed</member>
<member>modified_at</member>
<member>subject_date</member>
<member>tags</member>
<member>title</member>
<member>width</member>
</FieldNames>
</IndexDocumentsResult>
<ResponseMetadata>
<RequestId>eb2b2390-6bbd-11e2-ab66-93f3a90dcf2a</RequestId>
</ResponseMetadata>
</IndexDocumentsResponse>
"""
def test_cloudsearch_index_documents(self):
"""
Check that the correct arguments are sent to AWS when indexing a
domain.
"""
self.set_http_response(status_code=200)
api_response = self.service_connection.index_documents('demo')
self.assert_request_parameters({
'Action': 'IndexDocuments',
'DomainName': 'demo',
'Version': '2011-02-01',
})
def test_cloudsearch_index_documents_resp(self):
"""
Check that the AWS response is being parsed correctly when indexing a
domain.
"""
self.set_http_response(status_code=200)
api_response = self.service_connection.index_documents('demo')
self.assertEqual(api_response, ['average_score', 'brand_id', 'colors',
'context', 'context_owner',
'created_at', 'creator_id',
'description', 'file_size', 'format',
'has_logo', 'has_messaging', 'height',
'image_id', 'ingested_from',
'is_advertising', 'is_photo',
'is_reviewed', 'modified_at',
'subject_date', 'tags', 'title',
'width'])
| mit |
jeffzheng1/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 56 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Emergya/icm-openedx-educamadrid-platform-basic | common/test/acceptance/tests/studio/test_studio_home.py | 23 | 5862 | """
Acceptance tests for Home Page (My Courses / My Libraries).
"""
from bok_choy.web_app_test import WebAppTest
from opaque_keys.edx.locator import LibraryLocator
from ...fixtures import PROGRAMS_STUB_URL
from ...fixtures.config import ConfigModelFixture
from ...fixtures.programs import ProgramsFixture
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.index import DashboardPage, DashboardPageWithPrograms
class CreateLibraryTest(WebAppTest):
"""
Test that we can create a new content library on the studio home page.
"""
def setUp(self):
"""
Load the helper for the home page (dashboard page)
"""
super(CreateLibraryTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_create_library(self):
"""
From the home page:
Click "New Library"
Fill out the form
Submit the form
We should be redirected to the edit view for the library
Return to the home page
The newly created library should now appear in the list of libraries
"""
name = "New Library Name"
org = "TestOrgX"
number = "TESTLIB"
self.auth_page.visit()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.has_library(name=name, org=org, number=number))
self.assertTrue(self.dashboard_page.has_new_library_button())
self.dashboard_page.click_new_library()
self.assertTrue(self.dashboard_page.is_new_library_form_visible())
self.dashboard_page.fill_new_library_form(name, org, number)
self.assertTrue(self.dashboard_page.is_new_library_form_valid())
self.dashboard_page.submit_new_library_form()
# The next page is the library edit view; make sure it loads:
lib_page = LibraryEditPage(self.browser, LibraryLocator(org, number))
lib_page.wait_for_page()
# Then go back to the home page and make sure the new library is listed there:
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_library(name=name, org=org, number=number))
class DashboardProgramsTabTest(WebAppTest):
"""
Test the programs tab on the studio home page.
"""
def setUp(self):
super(DashboardProgramsTabTest, self).setUp()
ProgramsFixture().install_programs([])
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPageWithPrograms(self.browser)
self.auth_page.visit()
def set_programs_api_configuration(self, is_enabled=False, api_version=1, api_url=PROGRAMS_STUB_URL,
js_path='/js', css_path='/css'):
"""
Dynamically adjusts the programs API config model during tests.
"""
ConfigModelFixture('/config/programs', {
'enabled': is_enabled,
'enable_studio_tab': is_enabled,
'enable_student_dashboard': is_enabled,
'api_version_number': api_version,
'internal_service_url': api_url,
'public_service_url': api_url,
'authoring_app_js_path': js_path,
'authoring_app_css_path': css_path,
'cache_ttl': 0
}).install()
def test_tab_is_disabled(self):
"""
The programs tab and "new program" button should not appear at all
unless enabled via the config model.
"""
self.set_programs_api_configuration()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.is_programs_tab_present())
self.assertFalse(self.dashboard_page.is_new_program_button_present())
def test_tab_is_enabled_with_empty_list(self):
"""
The programs tab and "new program" button should appear when enabled
via config. When the programs list is empty, a button should appear
that allows creating a new program.
"""
self.set_programs_api_configuration(True)
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.is_programs_tab_present())
self.assertTrue(self.dashboard_page.is_new_program_button_present())
results = self.dashboard_page.get_program_list()
self.assertEqual(results, [])
self.assertTrue(self.dashboard_page.is_empty_list_create_button_present())
def test_tab_is_enabled_with_nonempty_list(self):
"""
The programs tab and "new program" button should appear when enabled
via config, and the results of the program list should display when
the list is nonempty.
"""
test_program_values = [('first program', 'org1'), ('second program', 'org2')]
ProgramsFixture().install_programs(test_program_values)
self.set_programs_api_configuration(True)
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.is_programs_tab_present())
self.assertTrue(self.dashboard_page.is_new_program_button_present())
results = self.dashboard_page.get_program_list()
self.assertEqual(results, test_program_values)
self.assertFalse(self.dashboard_page.is_empty_list_create_button_present())
def test_tab_requires_staff(self):
"""
The programs tab and "new program" button will not be available, even
when enabled via config, if the user is not global staff.
"""
self.set_programs_api_configuration(True)
AutoAuthPage(self.browser, staff=False).visit()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.is_programs_tab_present())
self.assertFalse(self.dashboard_page.is_new_program_button_present())
| agpl-3.0 |
vladmm/intellij-community | python/lib/Lib/site-packages/django/utils/cache.py | 71 | 9060 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
from django.conf import settings
from django.core.cache import get_cache
from django.utils.encoding import smart_str, iri_to_uri
from django.utils.http import http_date
from django.utils.hashcompat import md5_constructor
from django.utils.translation import get_language
from django.http import HttpRequest
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def _i18n_cache_key_suffix(request, cache_key):
"""If enabled, returns the cache key ending with a locale."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = md5_constructor(iri_to_uri(request.path))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, request.method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = md5_constructor(iri_to_uri(request.path))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path. It can be used in the
request phase because it pulls the list of headers to take into account
from the global path registry and uses those to build a cache key to check
against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.path
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| apache-2.0 |
yukoba/sympy | bin/mailmap_update.py | 47 | 3368 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A tool to help keep .mailmap and AUTHORS up-to-date.
"""
# TODO:
# - Check doc/src/aboutus.rst
# - Make it easier to update .mailmap or AUTHORS with the correct entries.
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
from fabric.api import local, env
from fabric.colors import yellow, blue, green, red
from fabric.utils import error
mailmap_update_path = os.path.abspath(__file__)
mailmap_update_dir = os.path.dirname(mailmap_update_path)
sympy_top = os.path.split(mailmap_update_dir)[0]
sympy_dir = os.path.join(sympy_top, 'sympy')
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
from sympy.utilities.misc import filldedent
try:
# Only works in newer versions of fabric
env.colorize_errors = True
except AttributeError:
pass
git_command = 'git log --format="%aN <%aE>" | sort -u'
git_people = unicode(local(git_command, capture=True), 'utf-8').strip().split("\n")
from distutils.version import LooseVersion
git_ver = local('git --version', capture=True)[12:]
if LooseVersion(git_ver) < LooseVersion('1.8.4.2'):
print(yellow("Please use a newer git version >= 1.8.4.2"))
with open(os.path.realpath(os.path.join(__file__, os.path.pardir,
os.path.pardir, "AUTHORS"))) as fd:
AUTHORS = unicode(fd.read(), 'utf-8')
firstauthor = "Ondřej Čertík"
authors = AUTHORS[AUTHORS.find(firstauthor):].strip().split('\n')
# People who don't want to be listed in AUTHORS
authors_skip = ["Kirill Smelkov <kirr@landau.phys.spbu.ru>", "Sergey B Kirpichev <skirpichev@gmail.com>"]
predate_git = 0
exit1 = False
print(blue(filldedent("""Read the text at the top of AUTHORS and the text at
the top of .mailmap for information on how to fix the below errors. If
someone is missing from AUTHORS, add them where they would have been if they
were added after their first pull request was merged (checkout the merge
commit from the first pull request and see who is at the end of the AUTHORS
file at that commit.)""")))
print()
print(yellow("People who are in AUTHORS but not in git:"))
print()
for name in sorted(set(authors) - set(git_people)):
if name.startswith("*"):
# People who are in AUTHORS but predate git
predate_git += 1
continue
exit1 = True
print(name)
print()
print(yellow("People who are in git but not in AUTHORS:"))
print()
for name in sorted(set(git_people) - set(authors) - set(authors_skip)):
exit1 = True
print(name)
# + 1 because the last newline is stripped by strip()
authors_count = AUTHORS[AUTHORS.find(firstauthor):].strip().count("\n") + 1
adjusted_authors_count = (
authors_count
- predate_git
+ len(authors_skip)
)
git_count = len(git_people)
print()
print(yellow("There are {git_count} people in git, and {adjusted_authors_count} "
"(adjusted) people from AUTHORS".format(git_count=git_count,
adjusted_authors_count=adjusted_authors_count)))
if git_count != adjusted_authors_count:
error("These two numbers are not the same!")
else:
print()
print(green(filldedent("""Congratulations. The AUTHORS and .mailmap files
appear to be up to date. You should now verify that doc/src/aboutus has %s
people.""" % authors_count)))
if exit1:
print()
print(red("There were errors. Please fix them."))
sys.exit(1)
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/IPython/utils/shimmodule.py | 17 | 2809 | """A shim module for deprecated imports
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
import types
from .importstring import import_item
class ShimWarning(Warning):
"""A warning to show when a module has moved, and a shim is in its place."""
class ShimImporter(object):
"""Import hook for a shim.
This ensures that submodule imports return the real target module,
not a clone that will confuse `is` and `isinstance` checks.
"""
def __init__(self, src, mirror):
self.src = src
self.mirror = mirror
def _mirror_name(self, fullname):
"""get the name of the mirrored module"""
return self.mirror + fullname[len(self.src):]
def find_module(self, fullname, path=None):
"""Return self if we should be used to import the module."""
if fullname.startswith(self.src + '.'):
mirror_name = self._mirror_name(fullname)
try:
mod = import_item(mirror_name)
except ImportError:
return
else:
if not isinstance(mod, types.ModuleType):
# not a module
return None
return self
def load_module(self, fullname):
"""Import the mirrored module, and insert it into sys.modules"""
mirror_name = self._mirror_name(fullname)
mod = import_item(mirror_name)
sys.modules[fullname] = mod
return mod
class ShimModule(types.ModuleType):
def __init__(self, *args, **kwargs):
self._mirror = kwargs.pop("mirror")
src = kwargs.pop("src", None)
if src:
kwargs['name'] = src.rsplit('.', 1)[-1]
super(ShimModule, self).__init__(*args, **kwargs)
# add import hook for descendent modules
if src:
sys.meta_path.append(
ShimImporter(src=src, mirror=self._mirror)
)
@property
def __path__(self):
return []
@property
def __spec__(self):
"""Don't produce __spec__ until requested"""
return __import__(self._mirror).__spec__
def __dir__(self):
return dir(__import__(self._mirror))
@property
def __all__(self):
"""Ensure __all__ is always defined"""
mod = __import__(self._mirror)
try:
return mod.__all__
except AttributeError:
return [name for name in dir(mod) if not name.startswith('_')]
def __getattr__(self, key):
# Use the equivalent of import_item(name), see below
name = "%s.%s" % (self._mirror, key)
try:
return import_item(name)
except ImportError:
raise AttributeError(key)
| artistic-2.0 |
coreyoconnor/nixops | nixops/resources/azure_queue.py | 6 | 4667 | # -*- coding: utf-8 -*-
# Automatic provisioning of Azure Queues.
import os
import azure
from nixops.util import attr_property
from nixops.azure_common import StorageResourceDefinition, StorageResourceState
from nixops.resources.azure_resource_group import AzureResourceGroupState
from nixops.resources.azure_storage import AzureStorageState
class AzureQueueDefinition(StorageResourceDefinition):
"""Definition of an Azure Queue"""
@classmethod
def get_type(cls):
return "azure-queue"
@classmethod
def get_resource_type(cls):
return "azureQueues"
def __init__(self, xml):
StorageResourceDefinition.__init__(self, xml)
self.queue_name = self.get_option_value(xml, 'name', str)
self.copy_option(xml, 'storage', 'resource')
self.copy_metadata(xml)
self.copy_signed_identifiers(xml.find("attrs/attr[@name='acl']"))
def show_type(self):
return "{0}".format(self.get_type())
class AzureQueueState(StorageResourceState):
"""State of an Azure Queue"""
queue_name = attr_property("azure.name", None)
storage = attr_property("azure.storage", None)
signed_identifiers = attr_property("azure.signedIdentifiers", {}, 'json')
metadata = attr_property("azure.metadata", {}, 'json')
@classmethod
def get_type(cls):
return "azure-queue"
def show_type(self):
s = super(AzureQueueState, self).show_type()
if self.state == self.UP: s = "{0}".format(s)
return s
@property
def resource_id(self):
return self.queue_name
@property
def full_name(self):
return "Azure queue '{0}'".format(self.resource_id)
def get_storage_name(self):
return self.storage
def get_key(self):
storage = self.get_resource_state(AzureStorageState, self.storage)
access_key = self.access_key or (storage and storage.access_key)
if not access_key:
raise Exception("Can't obtain the access key needed to manage {0}"
.format(self.full_name))
return access_key
def is_settled(self, resource):
return True
def get_resource_allow_exceptions(self):
return self.qs().get_queue_metadata(self.resource_id)
def destroy_resource(self):
self.qs().delete_queue(self.resource_id, fail_not_exist = True)
defn_properties = [ 'metadata' ]
def create(self, defn, check, allow_reboot, allow_recreate):
self.no_property_change(defn, 'storage')
self.queue_name = defn.queue_name
self.access_key = defn.access_key
self.storage = defn.storage
if check:
queue = self.get_settled_resource()
if not queue:
self.warn_missing_resource()
elif self.state == self.UP:
self.handle_changed_metadata(queue)
self.handle_changed_signed_identifiers(
self.qs().get_queue_acl(self.queue_name))
else:
self.warn_not_supposed_to_exist()
self.confirm_destroy()
if self.state != self.UP:
if self.get_settled_resource():
raise Exception("tried creating a queue that already exists; "
"please run 'deploy --check' to fix this")
self.log("creating {0} in {1}...".format(self.full_name, defn.storage))
self.qs().create_queue(defn.queue_name,
x_ms_meta_name_values = defn.metadata,
fail_on_exist = True)
self.state = self.UP
self.copy_properties(defn)
if self.properties_changed(defn):
self.log("updating properties of {0}...".format(self.full_name))
self.get_settled_resource_assert_exists()
self.qs().set_queue_metadata(self.queue_name, x_ms_meta_name_values = defn.metadata)
self.metadata = defn.metadata
if self.signed_identifiers != defn.signed_identifiers:
self.log("updating the ACL of {0}..."
.format(self.full_name))
self.get_settled_resource_assert_exists()
signed_identifiers = self._dict_to_signed_identifiers(defn.signed_identifiers)
self.qs().set_queue_acl(self.queue_name,
signed_identifiers = signed_identifiers)
self.signed_identifiers = defn.signed_identifiers
def create_after(self, resources, defn):
return {r for r in resources
if isinstance(r, AzureResourceGroupState) or isinstance(r, AzureStorageState)}
| lgpl-3.0 |
bakerlover/project4 | lib/flask/module.py | 850 | 1363 | # -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
| apache-2.0 |
jdmcbr/blaze | blaze/expr/reductions.py | 10 | 8915 | from __future__ import absolute_import, division, print_function
import datashape
from datashape import Record, DataShape, dshape, TimeDelta
from datashape import coretypes as ct
from datashape.predicates import iscollection, isboolean, isnumeric, isdatelike
from numpy import inf
from odo.utils import copydoc
import toolz
from .core import common_subexpression
from .expressions import Expr, ndim
from .strings import isstring
from .expressions import dshape_method_list, method_properties
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_hash', '_child', 'axis', 'keepdims'
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
return DataShape(*(shape + (self.schema,)))
@property
def schema(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
result = toolz.first(schema.types)
else:
result = schema
return DataShape(result)
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
child_name = self._child._name
if child_name is None or child_name == '_':
return type(self).__name__
else:
return '%s_%s' % (child_name, type(self).__name__)
def __str__(self):
kwargs = list()
if self.keepdims:
kwargs.append('keepdims=True')
if self.axis != tuple(range(self._child.ndim)):
kwargs.append('axis=' + str(self.axis))
other = sorted(
set(self.__slots__[1:]) - set(['_child', 'axis', 'keepdims']))
for slot in other:
kwargs.append('%s=%s' % (slot, getattr(self, slot)))
name = type(self).__name__
if kwargs:
return '%s(%s, %s)' % (name, self._child, ', '.join(kwargs))
else:
return '%s(%s)' % (name, self._child)
class any(Reduction):
schema = dshape(ct.bool_)
class all(Reduction):
schema = dshape(ct.bool_)
class sum(Reduction):
@property
def schema(self):
return DataShape(datashape.maxtype(super(sum, self).schema))
class max(Reduction):
pass
class min(Reduction):
pass
class mean(Reduction):
schema = dshape(ct.real)
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(var, self).__init__(child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(std, self).__init__(child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
schema = dshape(ct.int32)
class nunique(Reduction):
schema = dshape(ct.int32)
class nelements(Reduction):
"""Compute the number of elements in a collection, including missing values.
See Also
---------
blaze.expr.reductions.count: compute the number of non-null elements
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: float64}')
>>> t[t.amount < 1].nelements()
nelements(t[t.amount < 1])
"""
schema = dshape(ct.int32)
def nrows(expr):
return nelements(expr, axis=(0,))
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_hash', '_child', 'names', 'values', 'axis', 'keepdims'
def __init__(self, _child, names, values, axis=None, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
self.axis = axis
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
measure = Record(list(zip(self.names,
[v.schema for v in self.values])))
return DataShape(*(shape + (measure,)))
def __str__(self):
s = 'summary('
s += ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values))
if self.keepdims:
s += ', keepdims=True'
s += ')'
return s
@copydoc(Summary)
def summary(keepdims=False, axis=None, **kwargs):
items = sorted(kwargs.items(), key=toolz.first)
names = tuple(map(toolz.first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
child = common_subexpression(*children)
if axis is None:
axis = tuple(range(ndim(child)))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
return Summary(child, names, values, keepdims=keepdims, axis=axis)
def vnorm(expr, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == inf:
return max(abs(expr), axis=axis, keepdims=keepdims)
elif ord == -inf:
return min(abs(expr), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(expr), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(expr ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
return sum(abs(expr) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
dshape_method_list.extend([
(iscollection, set([count, nelements])),
(lambda ds: (iscollection(ds) and
(isstring(ds) or isnumeric(ds) or isboolean(ds) or
isdatelike(ds) or isinstance(ds, TimeDelta))),
set([min, max])),
(lambda ds: len(ds.shape) == 1,
set([nrows, nunique])),
(lambda ds: iscollection(ds) and isboolean(ds),
set([any, all])),
(lambda ds: iscollection(ds) and (isnumeric(ds) or isboolean(ds)),
set([mean, sum, std, var, vnorm])),
])
method_properties.update([nrows])
| bsd-3-clause |
jmorganc/laptimes | bin/scraper.py | 1 | 6154 | #!/usr/bin/python
import urllib2
from bs4 import BeautifulSoup
import time
import datetime
import MySQLdb
import sys, os
sys.path.append(os.path.abspath('{0}/../../'.format(os.path.abspath(__file__))))
from laptimes import config
def main():
while True:
racer_id = thread_control()
if racer_id:
print 'processing racer {0}'.format(str(racer_id))
races = get_races(str(racer_id))
for race in races:
laptimes = get_laptimes(race['racer_id'], race['race_id'])
race['laptimes'] = laptimes[0]
race_datetime = time.strptime(laptimes[1], '%m/%d/%Y %I:%M %p')
race['race_date'] = race_datetime
mysql_save(race)
else:
print 'no racers to process'
time.sleep(60)
def get_races(racer_id):
url = "http://clubspeedtiming.com/dkcdallas/RacerHistory.aspx?CustID="+racer_id
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
links = soup.find_all('a')
races = []
for link in links:
race = {}
racer_name = soup.find('span', attrs={'id':'lblRacerName'})
heat_type_string = link.get_text()[:23]
kart_id = link.get_text()[31:]
race_id = str(link.get('href'))[24:]
race['race_id'] = race_id
race['kart_id'] = kart_id
racer_name_str = unicode(racer_name.get_text().strip().replace('\n', '').replace('\t', '').replace('\r', ''))
race['racer_name'] = racer_name_str.encode('utf-8')
race['racer_id'] = racer_id
if heat_type_string == '10 min Adult Super Heat':
races.append(race)
return races
def get_laptimes(racer_id, race_id):
url = "http://clubspeedtiming.com/dkcdallas/HeatDetails.aspx?HeatNo="+race_id
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
racers = []
laptimes = []
race_date_span = soup.find('span', attrs={'id':'lblDate'})
race_date = race_date_span.get_text()
links = soup.find_all('a')
for link in links:
racers.append(int((link.get('href')[25:])))
racer_index = (sorted(racers).index(int(racer_id))+3)
tables = soup.find_all('table')
racer_table = tables[(racer_index)]
tds = racer_table.find_all('td')[1::2]
for td in tds:
laptime = td.get_text().split()
if laptime:
laptimes.append(laptime[0])
return laptimes, race_date
def mysql_save(race):
connection = mysql_connect()
cursor = connection.cursor()
date_time = race['race_date']
date_time = datetime.datetime(date_time.tm_year, date_time.tm_mon, date_time.tm_mday, date_time.tm_hour, date_time.tm_min)
create_racer(race['racer_id'], race['racer_name'], cursor)
create_kart(race['kart_id'], cursor)
create_race(race['race_id'], race['race_date'], cursor)
for laptime in race['laptimes']:
laptime_index = race['laptimes'].index(laptime)
time_shift = 0
i = 0
if laptime_index == 0:
time_shift = round(float(laptime))
else:
while (i < laptime_index):
time_shift += round(float(race['laptimes'][i]))
i += 1
date_time = date_time + datetime.timedelta(seconds=time_shift)
create_laptime(race['racer_id'], race['kart_id'], race['race_id'], (laptime_index+1), laptime, date_time, cursor)
update_queue(race['racer_id'], cursor)
connection.commit()
cursor.close()
connection.close()
def create_racer(racer_id, racer_name, cursor):
cursor.execute('LOCK TABLE racers write')
cursor.execute('SELECT id FROM racers WHERE id = %s', (racer_id,))
result = cursor.fetchone()
if not result:
cursor.execute('INSERT INTO racers(id, racer_name) VALUES(%s, %s)', (racer_id, racer_name,))
print 'Racer {0} created.'.format(racer_name)
cursor.execute('UNLOCK TABLES')
def create_kart(kart_id, cursor):
cursor.execute('LOCK TABLE karts write')
cursor.execute('SELECT id FROM karts WHERE id = %s', (kart_id,))
result = cursor.fetchone()
if not result:
cursor.execute('INSERT INTO karts(id) VALUES(%s)', (kart_id,))
print 'Kart {0} created.'.format(kart_id,)
cursor.execute('UNLOCK TABLES')
def create_race(race_id, date_time, cursor):
cursor.execute('LOCK TABLE races write')
cursor.execute('SELECT id FROM races WHERE id = %s', (race_id,))
result = cursor.fetchone()
date_time = datetime.datetime(date_time.tm_year, date_time.tm_mon, date_time.tm_mday, date_time.tm_hour, date_time.tm_min)
if not result:
cursor.execute('INSERT INTO races(id, datetime) VALUES(%s, %s)', (race_id, date_time,))
print 'Race {0} created.'.format(race_id,)
cursor.execute('UNLOCK TABLES')
def create_laptime(racer_id, kart_id, race_id, lap_number, laptime, date_time, cursor):
date_time_str = date_time.strftime('%Y-%m-%d %H:%M:%S')
cursor.execute('LOCK TABLE laptimes write')
cursor.execute('SELECT racer_id, kart_id, race_id, lap_number, laptime \
FROM laptimes \
WHERE racer_id = %s \
AND kart_id = %s \
AND race_id = %s \
AND lap_number = %s', (racer_id, kart_id, race_id, lap_number,))
result = cursor.fetchone()
if not result:
cursor.execute('INSERT INTO laptimes(racer_id, kart_id, race_id, lap_number, laptime, datetime) \
VALUES(%s, %s, %s, %s, %s, %s)', (racer_id, kart_id, race_id, lap_number, laptime, date_time,))
print 'Laptime created: ({0}, {1}, {2})'.format(racer_id, laptime, date_time)
cursor.execute('UNLOCK TABLES')
def update_queue(racer_id, cursor):
cursor.execute('LOCK TABLE threading WRITE')
cursor.execute('UPDATE threading \
SET processing = -1, processing_finished = NOW() \
WHERE id = %s', (racer_id),)
cursor.execute('UNLOCK TABLES')
def thread_control():
connection = mysql_connect()
cursor = connection.cursor()
cursor.execute('LOCK TABLE threading WRITE')
cursor.execute('SELECT id FROM threading \
WHERE processing = 0 \
LIMIT 1')
result = cursor.fetchone()
if result:
racer_id = result[0]
cursor.execute('UPDATE threading \
SET processing = 1, processing_started = NOW() \
WHERE id = %s', (racer_id),)
connection.commit()
cursor.execute('UNLOCK TABLES')
cursor.close()
connection.close()
return racer_id
else:
cursor.execute('UNLOCK TABLES')
cursor.close()
connection.close()
def mysql_connect():
return MySQLdb.connect(config.opts['mysql']['host'], config.opts['mysql']['username'], config.opts['mysql']['password'], config.opts['mysql']['database'])
main()
| mit |
sander76/home-assistant | homeassistant/components/fortios/device_tracker.py | 24 | 2933 | """
Support to use FortiOS device like FortiGate as device tracker.
This component is part of the device_tracker platform.
"""
import logging
from fortiosapi import FortiOSAPI
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_TOKEN, CONF_VERIFY_SSL
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a FortiOSDeviceScanner."""
host = config[DOMAIN][CONF_HOST]
verify_ssl = config[DOMAIN][CONF_VERIFY_SSL]
token = config[DOMAIN][CONF_TOKEN]
fgt = FortiOSAPI()
try:
fgt.tokenlogin(host, token, verify_ssl)
except ConnectionError as ex:
_LOGGER.error("ConnectionError to FortiOS API: %s", ex)
return None
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Failed to login to FortiOS API: %s", ex)
return None
return FortiOSDeviceScanner(fgt)
class FortiOSDeviceScanner(DeviceScanner):
"""This class queries a FortiOS unit for connected devices."""
def __init__(self, fgt) -> None:
"""Initialize the scanner."""
self._clients = {}
self._clients_json = {}
self._fgt = fgt
def update(self):
"""Update clients from the device."""
clients_json = self._fgt.monitor("user/device/select", "")
self._clients_json = clients_json
self._clients = []
if clients_json:
for client in clients_json["results"]:
if client["last_seen"] < 180:
self._clients.append(client["mac"].upper())
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update()
return self._clients
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
_LOGGER.debug("Getting name of device %s", device)
device = device.lower()
data = self._clients_json
if data == 0:
_LOGGER.error("No json results to get device names")
return None
for client in data["results"]:
if client["mac"] == device:
try:
name = client["host"]["name"]
_LOGGER.debug("Getting device name=%s", name)
return name
except KeyError as kex:
_LOGGER.error("Name not found in client data: %s", kex)
return None
return None
| apache-2.0 |
BT-fgarbely/sale-workflow | __unported__/stock_picking_reorder_lines/__openerp__.py | 12 | 1348 | # -*- coding: utf-8 -*-
#
#
# Author: Alexandre Fayolle
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Stock picking lines with sequence number',
'version': '0.1',
'category': 'Warehouse Management',
'description': '''
Provide a new field on stock moves, allowing to manage the orders of moves
in a picking.
''',
'author': "Camptocamp,Odoo Community Association (OCA)",
'website': 'http://www.camptocamp.com',
'depends': ['stock', 'sale', 'sale_stock'],
'data': ['stock_view.xml'],
'demo': [],
'test': [],
'installable': False,
'auto_install': False,
'application': False,
'license': "AGPL-3",
}
| agpl-3.0 |
zofuthan/airmozilla | airmozilla/manage/views/loggedsearches.py | 15 | 2361 | import datetime
from django.shortcuts import render
from django.utils import timezone
from django.db.models import Count
from airmozilla.base.utils import paginate
from airmozilla.search.models import LoggedSearch
from .decorators import superuser_required
@superuser_required
def loggedsearches(request):
searches = (
LoggedSearch.objects
.select_related('event_clicked')
.order_by('-date')
)
paged = paginate(searches, request.GET.get('page'), 20)
context = {
'paginate': paged,
'hash_user_id': lambda x: str(hash(str(x)))[-4:],
}
return render(request, 'manage/loggedsearches.html', context)
@superuser_required
def loggedsearches_stats(request):
context = {}
now = timezone.now()
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
this_week = today - datetime.timedelta(days=today.weekday())
this_month = today.replace(day=1)
this_year = this_month.replace(month=1)
groups = (
('All searches', {}),
('Successful searches', {'results__gt': 0}),
('Failed searches', {'results': 0}),
)
context['groups'] = []
qs_base = LoggedSearch.objects.all()
for group_name, filters in groups:
qs = qs_base.filter(**filters)
counts = {}
counts['today'] = qs.filter(date__gte=today).count()
counts['this_week'] = qs.filter(date__gte=this_week).count()
counts['this_month'] = qs.filter(date__gte=this_month).count()
counts['this_year'] = qs.filter(date__gte=this_year).count()
counts['ever'] = qs.count()
context['groups'].append((group_name, counts, False))
qs = (
qs_base.extra(
select={'term_lower': 'LOWER(term)'}
)
.values('term_lower')
.annotate(count=Count('term'))
.order_by('-count')
)
terms = {}
terms['today'] = qs.filter(date__gte=today)[:5]
terms['this_week'] = qs.filter(date__gte=this_week)[:5]
terms['this_month'] = qs.filter(date__gte=this_month)[:5]
terms['this_year'] = qs.filter(date__gte=this_year)[:5]
terms['ever'] = qs[:5]
context['groups'].append(
(
'Most common terms (case insensitive, top 5)',
terms,
True
)
)
return render(request, 'manage/loggedsearches_stats.html', context)
| bsd-3-clause |
wanjohikibui/LIMS | testapp/back_models.py | 2 | 19623 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.contrib.gis.db import models
import datetime
from django.utils import timezone
from django.db.models import signals
from django.contrib.gis.db import models
from django.contrib.auth.models import User,Group
from django.core.validators import MaxLengthValidator,MinLengthValidator
#from django_hstore import hstore
def upload_application(instance, filename):
# return "title_images/%s" % (filename)
return '/'.join(['application_docs', str(instance.id_number), filename])
def upload_report(instance, filename):
return "report_images/%s" % (filename)
def upload_docs(instance, filename):
return "documents/%s" % (filename)
restriction_types = (
('Morgage', 'Morgage'),
('Courtorder', 'Court Order'),
('Caveat', 'Caveat'),
('Landuse','LAnd Use Restriction')
)
landuse_zoning_types = (
('Agricultural', 'Agricultural'),
('Residential', 'Residential'),
('Commercial', 'Commercial'),
('Industry', 'Industry'),
('Public', 'Public'),
('Reserve', 'Reserve'),
)
landcover_types = (
('1', 'Agricultural'),
('2', 'Residential'),
('3', 'Commercial'),
('4', 'Industry'),
('5', 'Public'),
('6', 'Reserve'),
('7', 'Industry'),
('8', 'Public'),
('9', 'Reserve'),
)
transaction_types = (
('Transfer', 'Transfer'),
('Subdivision', 'Subdivision'),
('Change of user', 'Change of user'),
('Development application', 'Development application'),
('Registration', 'Registration'),
('Valuation', 'Valuation'),
)
status_types = (
('Approved', 'Approved'),
('Rejected', 'Rejected'),
('Registered', 'Registered'),
('Deleted', 'Deleted'),
('Pending', 'Pending'),
('Withdrawn', 'Withdrawn'),
('Superceeded', 'Superceeded'),
)
application_status = (
('Unverified','Unverified'),
('Verified','Verified'),
('Approved','Approved'),
('Completed','Completed'),
('Closed','Closed')
)
party_types = (
('Individual', 'Individual'),
('Company', 'Company'),
('Community', 'Community'),
('Trust', 'Trust'),
('Family', 'Family'),
('Government', 'Government'),
)
agent_types=(
('Owner', 'Owner'),
('Buyer', 'Buyer'),
('Other','Other'),
)
actual_landuses=(
('Agricultural', 'Agricultural'),
('Residential', 'Residential'),
('Commercial', 'Commercial'),
('Industry', 'Industry'),
('Public', 'Public'),
('Reserve', 'Reserve'),
)
transaction_status_types = (
('Lodge', 'Lodge'),
('Validate', 'Validate'),
('Start', 'Start'),
('Assign', 'Assign'),
('Un-Assign', 'Un-Assign'),
('Dispatch', 'Dispatch'),
)
status_type=(
('Current','Current'),
('Historic','Historic'),
('Pending','Pending'),
('Previous','Previous'),
)
change_actions = (
#to edit types
('Update', 'Update'),
('Delete', 'Delete'),
('Insert', 'Insert'),
)
service_status_types=(
('Lodged', 'Lodged'),
('Validated', 'Validated'),
('Started', 'Started'),
('Assigned', 'Assigned'),
('Un-Assigned', 'Un-Assigned'),
('Dispatched', 'Dispatched'),
('Completed','Completed'),
('Archived','Archived'),
)
lims_badminunit_type=(
('Free-hold title', 'Free-hold title'),
('Lease-hold title', 'Lease-hold title'),
('Mining contract', 'Mining contract'),
('Conservation', 'Conservation'),
)
application_status_types = (
('Lodged', 'Lodged'),
('Validated', 'Validated'),
('Started', 'Started'),
('Assigned', 'Assigned'),
('Un-Assigned', 'Un-Assigned'),
('Dispatched', 'Dispatched'),
('Completed','Completed'),
('Archived','Archived'),
)
application_types=(
('Registration','Registration'),
('Official Search','Official Search'),
('Change of User','Change of User'),
)
landuse_restrictions_types = (
('Permitted Use', 'Permitted Use'),
('Non-permitted Use', 'Non-permitted Use'),
('Consented Use', 'Consented Use'),
('Actual Use', 'Actual Use'),
)
id_types=(
('National ID','National ID'),
('Passport','Passport'),
('Company Registration Number','Company Registration Number'),
)
######## LADM Objects#########
class UserProfile(models.Model):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40, blank=True)
key_expires = models.DateTimeField(default=datetime.date.today())
def __str__(self):
return self.user.username
class Meta:
verbose_name_plural='Userprofiles'
class Customer(User):
class Meta:
proxy = True
app_label = 'auth'
verbose_name = 'Customer account'
verbose_name_plural = 'Customer accounts'
class Staff(User):
class Meta:
proxy = True
app_label = 'auth'
verbose_name = 'Staff account'
verbose_name_plural = 'Staff accounts'
class ladm_badminunit(models.Model):
id=models.IntegerField(primary_key=True)
type_code=models.CharField(choices=lims_badminunit_type, max_length=50)
parcel_id=models.ForeignKey('las_parcel')
party_id=models.ForeignKey('las_party')
admindocumenturi=models.ForeignKey('Documents')
creation_date=models.DateField()
expiration_date=models.DateField()
objects=models.Manager()
def __unicode__(self):
return "%s %s" %(self.id, self.type_code)
class Meta:
verbose_name_plural = "ADmin Units"
managed = True
class AdministrationArea(models.Model):
unitid = models.IntegerField(primary_key=True)
name= models.CharField(max_length=50)
leng = models.FloatField()
area = models.FloatField()
geom = models.PolygonField(srid=21037)
objects = models.GeoManager()
class Meta:
verbose_name_plural = "AdministrationArea"
managed = True
class RegistrationSection(models.Model):
sectionid = models.IntegerField(primary_key=True)
adminarea = models.ForeignKey(AdministrationArea)
name= models.CharField(max_length=50)
leng = models.FloatField()
area = models.FloatField()
geom = models.PolygonField(srid=21037)
objects = models.GeoManager()
class Meta:
verbose_name_plural = "RegistrationSection"
managed = True
class RegistrationBlock (models.Model):
blockid = models.IntegerField(primary_key=True) # Field name made lowercase.
sectionid = models.ForeignKey(RegistrationSection) # Field name made lowercase.
name= models.CharField(max_length=50)
leng = models.FloatField()
area = models.FloatField()
geom = models.PolygonField(srid=21037)
objects = models.GeoManager()
class Meta:
verbose_name_plural = "Registration Blocks"
managed = True
class las_parcel(models.Model):
id=models.IntegerField(primary_key=True)
blockid=models.IntegerField(null=True,blank=True)
areacode=models.IntegerField(null=True,blank=True)
blockname=models.CharField(max_length=50,null=True,blank=True)
parcel_no=models.CharField(max_length=50,null=True,blank=True)
sectcode=models.IntegerField(null=True,blank=True)
land_use=models.ForeignKey('landuse_zoning',null=True,blank=True)
surveyornumber=models.CharField(max_length=50,null=True,blank=True)
surveydocid=models.ForeignKey('Documents',null=True,blank=True)
approval_datetime=models.DateTimeField(null=True,blank=True)
historic_datetime=models.DateTimeField(null=True,blank=True)
parent=models.CharField(max_length=15, null=True,default="null",blank=True)
area=models.FloatField()
length=models.FloatField()
geom=models.MultiPolygonField(srid=21037)
objects = models.GeoManager()
def __unicode__(self):
return "%s %s" %(self.id, self.blockid)
class Meta:
verbose_name_plural = "las_parcel"
managed = True
class Restrictions(models.Model):
restriction_id = models.AutoField(max_length=1, primary_key=True)
adminunit = models.ForeignKey(ladm_badminunit)
restriction_type = models.CharField(max_length=50, choices= restriction_types)
restriction_Description = models.CharField(max_length=50)
restriction_holder = models.CharField(max_length=50)
amount = models.IntegerField()
objects=models.Manager()
class Meta:
verbose_name_plural = "Restrictions"
managed = True
class Landuse_Restriction(models.Model):
id = models.IntegerField(primary_key=True)
adminunit = models.ForeignKey(ladm_badminunit)
landuserestriction_type = models.CharField(max_length=50, choices= landuse_restrictions_types)
landuserestriction_desc = models.TextField(max_length=100)
registration_date = models.DateTimeField()
expiry_date = models.DateTimeField()
objects=models.Manager()
class Meta:
verbose_name_plural = "Landuse Restriction"
managed = True
class unverifiedManager(models.Manager):
def get_queryset(self):
return super(unverifiedManager, self).get_queryset().filter(status='Unverified')
class verifiedManager(models.Manager):
def get_queryset(self):
return super(verifiedManager, self).get_queryset().filter(status='Verified')
class completedManager(models.Manager):
def get_queryset(self):
return super(completedManager, self).get_queryset().filter(status='Completed')
class approvedManager(models.Manager):
def get_queryset(self):
return super(approvedManager, self).get_queryset().filter(status='Approved')
class rejectedManager(models.Manager):
def get_queryset(self):
return super(rejectedManager, self).get_queryset().filter(status='Rejected')
class las_application(models.Model):
app_id=models.AutoField(primary_key=True)
first_name=models.CharField(max_length=50)
last_name=models.CharField(max_length=50)
id_type=models.CharField(max_length=20, null=True, choices=id_types, default=id_types[0][0])
id_number=models.CharField(max_length=15,null=True)
email = models.EmailField(max_length=50, default='user@user.com')
telephone = models.IntegerField(help_text="Enter phone number")
applicant_type=models.CharField(choices=agent_types,max_length=50)
date_applied = models.DateTimeField(auto_now_add=True)
date_completed = models.DateTimeField(null=True,blank=True)
date_approved = models.DateTimeField(null=True,blank=True)
application_type = models.CharField(max_length=50, choices=application_types)
title = models.FileField(upload_to= upload_application, null=True, help_text="Upload copy of Title")
search = models.FileField(upload_to= upload_application, null=True, help_text="Upload copy of Search document")
comment = models.FileField(upload_to= upload_application, null=True, help_text="Upload copy of comment form")
add_comment = models.FileField(upload_to= upload_application, null=True, help_text="Upload other comment(if available)")
scheme = models.FileField(upload_to=upload_application, null=True, help_text="Upload copy of Physical Scheme Plan")
ppa = models.FileField(upload_to= upload_application, null=True, help_text="Upload copy of PPA2")
receipt = models.FileField(upload_to= upload_application, null=True, help_text="Upload copy of Payment Receipt")
planning = models.FileField(upload_to= upload_application, null=True, help_text="Upload copy of Planning document")
status = models.CharField(max_length=15, null=True, choices=application_status, default=application_status[0][0])
registry_comments = models.TextField(max_length = 256, help_text="Registry section comments Here", null=True)
dc_comments = models.TextField(max_length = 256, help_text="Development control comments Here", null=True)
upload_dcreport = models.FileField(upload_to= upload_report, null=True)
final_comments = models.TextField(max_length = 256, help_text="Final comments Here", null=True)
user = models.ForeignKey(User)
#objects=unverifiedManager()
def __unicode__(self):
return u'%s' % (self.first_name)
class Meta:
verbose_name_plural = "las_applications"
managed = True
class Meta:
permissions = (
('view_application', 'Can verify applications'),
)
class development(las_application):
approved = verifiedManager()
class Meta:
proxy = True
verbose_name_plural = "Development_apps"
class Approved_apps(las_application):
approved = approvedManager()
class Meta:
proxy = True
verbose_name_plural = "Approved Apps"
class rejected(las_application):
approved = rejectedManager()
class Meta:
proxy = True
verbose_name_plural = "Rejected_apps"
class completed(las_application):
approved = completedManager()
class Meta:
proxy = True
verbose_name_plural = "Completed_Apps"
class dev_controlunit(models.Model):
las_application=models.OneToOneField(las_application)
User=models.ForeignKey(User)
dc_verified = models.NullBooleanField(blank=True, null=True, default=None)
dc_comments = models.TextField(max_length = 256, help_text="Development control comments Here", null=True)
date_checked = models.DateTimeField(auto_now_add=True)
objects=models.Manager()
class Meta:
verbose_name_plural = "Development Section"
managed = True
class las_party (models.Model):
GENDER_CHOICES = (
('Male', 'Male'),
('Female', 'Female'),
('Other', 'N/A'),
)
preffered_comm_methd=(
('Email','Email'),
('Mobile','Mobile'),
('Tel','Telephone')
)
id=models.IntegerField(primary_key=True)
ext_id=models.CharField(max_length=50)
partytype=models.CharField(choices=party_types,max_length=50)
name=models.CharField(max_length=50)
last_name=models.CharField(max_length=50, null=True, blank=True)
gender_code=models.CharField(choices=GENDER_CHOICES,max_length=50)
id_type_code=models.CharField(choices=id_types,max_length=50)
id_number=models.CharField(max_length=50)
# contacts=hstore.DictionaryField()
address_id=models.CharField(max_length=50)
email=models.EmailField()
mobile=models.CharField(max_length=50)
phone=models.CharField(max_length=50)
preffered_communication=models.CharField(choices=preffered_comm_methd,max_length=50)
objects=models.Manager()
#objects = hstore.HStoreManager()
def __unicode__(self):
return "%s %s" %(self.id, self.name)
class Meta:
verbose_name_plural = "las_party"
managed = True
class transaction(models.Model):
id=models.IntegerField(primary_key=True)
from_application_id=models.ForeignKey('las_application')
assigned=models.BooleanField()
assignee_id=models.ForeignKey('UserProfile')
assigned_date=models.DateTimeField()
status=models.CharField(choices=transaction_status_types,max_length=50)
approval_datatime=models.DateTimeField()
service_fee=models.IntegerField()
fee_paid=models.BooleanField()
change_action=models.CharField(choices=change_actions,max_length=50)
change_user=models.CharField(max_length=50)
change_time=models.DateTimeField()
notes=models.TextField()
objects=models.Manager()
def __unicode__(self):
return "%s" %(self.id)
class Meta:
verbose_name_plural = "transaction"
managed = True
#### KLADM Extensions#####
class landuse_zoning(models.Model):
id = models.AutoField(primary_key=True)
zone_code = models.IntegerField()
zone_type = models.CharField(max_length=50, null=True, choices= landuse_zoning_types)
zone_Description = models.CharField(max_length=50)
area=models.FloatField()
length=models.FloatField()
geom = models.MultiPolygonField(srid=21037)
objects = models.GeoManager()
def __unicode__(self): # __unicode__ on Python 2
return 'Type: %s' % self.zone_type
class Meta:
verbose_name_plural = "Landuse Zoning"
managed = True
class landcover(models.Model):
gid = models.AutoField(primary_key=True)
landcover_code=models.IntegerField(null=True)
landcover_type = models.CharField(max_length=50,null=True,blank=True)
landcover_desc= models.CharField(max_length=50, blank=True, null=True, choices= landcover_types)
area= models.FloatField()
geom = models.MultiPolygonField(srid=21037)
objects=models.GeoManager()
class Meta:
verbose_name_plural = "landcover"
managed = True
class valuation(models.Model):
valuation_id = models.IntegerField(primary_key=True)
badminunit = models.ForeignKey('ladm_badminunit')
value_amount = models.IntegerField()
valuationstartdate = models.DateTimeField()
valuationenddate = models.DateTimeField()
objects=models.Manager()
class Meta:
verbose_name_plural = "Valuation"
managed = True
class Documents (models.Model):
id=models.IntegerField(primary_key=True)
of_type=models.CharField(max_length=10)
document_name=models.CharField(max_length=50, blank=False, null=False)
document_image=models.FileField(upload_to= upload_docs, null=True)
datetime_uploaded=models.DateTimeField()
objects=models.GeoManager()
class Meta:
verbose_name_plural = "Uploaded Documents"
managed = True
class Rivers(models.Model):
id=models.IntegerField(primary_key=True)
name= models.CharField(max_length=255)
reserve=models.IntegerField( null=True, blank= True)
geom= models.MultiLineStringField(srid=21037)
objects=models.Manager()
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Rivers"
managed = True
class Riperian(models.Model):
id=models.IntegerField(primary_key=True)
name= models.CharField(max_length=255)
reserve=models.IntegerField( null=True, blank= True)
geom= models.MultiPolygonField(srid=21037)
objects=models.Manager()
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Riperian"
managed = True
class Roads(models.Model):
id=models.IntegerField(primary_key=True)
name= models.CharField(max_length=255)
road_type=models.CharField(max_length= 50)
road_class=models.CharField(max_length=5)
reserve=models.IntegerField( null=True, blank= True)
geom= models.MultiLineStringField(srid=21037)
objects=models.GeoManager()
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Roads"
managed = True
| gpl-2.0 |
syci/OCB | openerp/addons/base/ir/ir_filters.py | 44 | 8085 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import exceptions
from openerp.osv import osv, fields
from openerp.tools.translate import _
class ir_filters(osv.osv):
_name = 'ir.filters'
_description = 'Filters'
def _list_all_models(self, cr, uid, context=None):
cr.execute("SELECT model, name FROM ir_model ORDER BY name")
return cr.fetchall()
def copy(self, cr, uid, id, default=None, context=None):
name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name':_('%s (copy)') % name})
return super(ir_filters, self).copy(cr, uid, id, default, context)
def _get_action_domain(self, cr, uid, action_id=None):
"""Return a domain component for matching filters that are visible in the
same context (menu/view) as the given action."""
if action_id:
# filters specific to this menu + global ones
return [('action_id', 'in' , [action_id, False])]
# only global ones
return [('action_id', '=', False)]
def get_filters(self, cr, uid, model, action_id=None, context=None):
"""Obtain the list of filters available for the user on the given model.
:param action_id: optional ID of action to restrict filters to this action
plus global filters. If missing only global filters are returned.
The action does not have to correspond to the model, it may only be
a contextual action.
:return: list of :meth:`~osv.read`-like dicts containing the
``name``, ``is_default``, ``domain``, ``user_id`` (m2o tuple),
``action_id`` (m2o tuple) and ``context`` of the matching ``ir.filters``.
"""
# available filters: private filters (user_id=uid) and public filters (uid=NULL),
# and filters for the action (action_id=action_id) or global (action_id=NULL)
context = self.pool['res.users'].context_get(cr, uid)
action_domain = self._get_action_domain(cr, uid, action_id)
filter_ids = self.search(cr, uid, action_domain +
[('model_id','=',model),('user_id','in',[uid, False])])
my_filters = self.read(cr, uid, filter_ids,
['name', 'is_default', 'domain', 'context', 'user_id', 'sort'], context=context)
return my_filters
def _check_global_default(self, cr, uid, vals, matching_filters, context=None):
""" _check_global_default(cursor, UID, dict, list(dict), dict) -> None
Checks if there is a global default for the model_id requested.
If there is, and the default is different than the record being written
(-> we're not updating the current global default), raise an error
to avoid users unknowingly overwriting existing global defaults (they
have to explicitly remove the current default before setting a new one)
This method should only be called if ``vals`` is trying to set
``is_default``
:raises openerp.exceptions.Warning: if there is an existing default and
we're not updating it
"""
action_domain = self._get_action_domain(cr, uid, vals.get('action_id'))
existing_default = self.search(cr, uid, action_domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', False),
('is_default', '=', True)], context=context)
if not existing_default: return
if matching_filters and \
(matching_filters[0]['id'] == existing_default[0]):
return
raise exceptions.Warning(
_("There is already a shared filter set as default for %(model)s, delete or change it before setting a new default") % {
'model': vals['model_id']
})
def create_or_replace(self, cr, uid, vals, context=None):
lower_name = vals['name'].lower()
action_id = vals.get('action_id')
current_filters = self.get_filters(cr, uid, vals['model_id'], action_id)
matching_filters = [f for f in current_filters
if f['name'].lower() == lower_name
# next line looks for matching user_ids (specific or global), i.e.
# f.user_id is False and vals.user_id is False or missing,
# or f.user_id.id == vals.user_id
if (f['user_id'] and f['user_id'][0]) == vals.get('user_id', False)]
if vals.get('is_default'):
if vals.get('user_id'):
# Setting new default: any other default that belongs to the user
# should be turned off
action_domain = self._get_action_domain(cr, uid, action_id)
act_ids = self.search(cr, uid, action_domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', vals['user_id']),
('is_default', '=', True),
], context=context)
if act_ids:
self.write(cr, uid, act_ids, {'is_default': False}, context=context)
else:
self._check_global_default(
cr, uid, vals, matching_filters, context=None)
# When a filter exists for the same (name, model, user) triple, we simply
# replace its definition (considering action_id irrelevant here)
if matching_filters:
self.write(cr, uid, matching_filters[0]['id'], vals, context)
return matching_filters[0]['id']
return self.create(cr, uid, vals, context)
_sql_constraints = [
# Partial constraint, complemented by unique index (see below)
# Still useful to keep because it provides a proper error message when a violation
# occurs, as it shares the same prefix as the unique index.
('name_model_uid_unique', 'unique (name, model_id, user_id, action_id)', 'Filter names must be unique'),
]
def _auto_init(self, cr, context=None):
result = super(ir_filters, self)._auto_init(cr, context)
# Use unique index to implement unique constraint on the lowercase name (not possible using a constraint)
cr.execute("DROP INDEX IF EXISTS ir_filters_name_model_uid_unique_index") # drop old index w/o action
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_filters_name_model_uid_unique_action_index'")
if not cr.fetchone():
cr.execute("""CREATE UNIQUE INDEX "ir_filters_name_model_uid_unique_action_index" ON ir_filters
(lower(name), model_id, COALESCE(user_id,-1), COALESCE(action_id,-1))""")
return result
_columns = {
'name': fields.char('Filter Name', translate=True, required=True),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade',
help="The user this filter is private to. When left empty the filter is public "
"and available to all users."),
'domain': fields.text('Domain', required=True),
'context': fields.text('Context', required=True),
'sort': fields.text('Sort', required=True),
'model_id': fields.selection(_list_all_models, 'Model', required=True),
'is_default': fields.boolean('Default filter'),
'action_id': fields.many2one('ir.actions.actions', 'Action', ondelete='cascade',
help="The menu action this filter applies to. "
"When left empty the filter applies to all menus "
"for this model."),
'active': fields.boolean('Active')
}
_defaults = {
'domain': '[]',
'context':'{}',
'sort': '[]',
'user_id': lambda self,cr,uid,context=None: uid,
'is_default': False,
'active': True
}
_order = 'model_id, name, id desc'
| agpl-3.0 |
deandunbar/html2bwml | venv/lib/python2.7/site-packages/pyasn1/type/constraint.py | 382 | 7279 | #
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
| mit |
mtnman38/Aggregate | Executables/Aggregate 0.8.7 for Macintosh.app/Contents/Resources/lib/python2.7/email/mime/application.py | 414 | 1256 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Keith Dart
# Contact: email-sig@python.org
"""Class representing application/* type MIME documents."""
__all__ = ["MIMEApplication"]
from email import encoders
from email.mime.nonmultipart import MIMENonMultipart
class MIMEApplication(MIMENonMultipart):
"""Class for generating application/* MIME documents."""
def __init__(self, _data, _subtype='octet-stream',
_encoder=encoders.encode_base64, **_params):
"""Create an application/* type MIME document.
_data is a string containing the raw application data.
_subtype is the MIME content type subtype, defaulting to
'octet-stream'.
_encoder is a function which will perform the actual encoding for
transport of the application data, defaulting to base64 encoding.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
raise TypeError('Invalid application MIME subtype')
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
self.set_payload(_data)
_encoder(self)
| gpl-2.0 |
Mesitis/community | sample-code/Python/25 Cashflows/get_cashflow_earnings.py | 1 | 2695 | '''
- login and get token
- process 2FA if 2FA is setup for this account
- Get cashflow earnings for a given user
'''
import requests
import json
get_token_url = "https://api.canopy.cloud:443/api/v1/sessions/"
validate_otp_url = "https://api.canopy.cloud:443/api/v1/sessions/otp/validate.json" #calling the production server for OTP authentication
get_partner_users_url = "https://api.canopy.cloud:443/api/v1/admin/users.json"
get_cashflow_earnings_url = "https://api.canopy.cloud:443/api/v1/cashflows.json"
#please replace below with your username and password over here
username = 'userxxx'
password = 'passxxx'
#please enter the OTP token in case it is enabled
otp_code = '123456'
#first call for a fresh token
payload = "user%5Busername%5D=" + username + "&user%5Bpassword%5D=" + password
headers = {
'accept': "application/json",
'content-type':"application/x-www-form-urlencoded"
}
response = requests.request("POST", get_token_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True)
token = response.json()['token']
login_flow = response.json()['login_flow']
#in case 2FA is enabled use the OTP code to get the second level of authentication
if login_flow == '2fa_verification':
headers['Authorization'] = token
payload = 'otp_code=' + otp_code
response = requests.request("POST", validate_otp_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True) #print response.text
token = response.json()['token']
login_role = response.json()['role']
switch_user_id = response.json()['id']
if login_role == 'Partneradmin':
#print "============== partner's users ==========="
headers = {
'authorization': token,
'content-type': "application/x-www-form-urlencoded; charset=UTF-8"
}
partner_users = []
response = requests.request("GET", get_partner_users_url, headers=headers)
for parent_user in response.json()['users']:
partner_users.append(parent_user['id'])
#print partner_users
#take the first users in the list as the switch_user_id
switch_user_id = partner_users[0]
#in case the user is a partner_admin then switch_user_id is any one of the users it has access to (here we take the first one from the list)
#set per page value
per_page = "20"
querystring = {"page":"1","per_page":per_page}
headers = {
'authorization': token,
'content-type': "application/x-www-form-urlencoded; charset=UTF-8",
'x-app-switch-user': str(switch_user_id)
}
response = requests.request("GET", get_cashflow_earnings_url, headers=headers, params=querystring)
print json.dumps(response.json(), indent=4, sort_keys = True)
| mit |
MSeifert04/astropy | astropy/coordinates/tests/test_frames_with_velocity.py | 3 | 13882 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates.builtin_frames import ICRS, Galactic, Galactocentric
from astropy.coordinates import builtin_frames as bf
from astropy.coordinates import galactocentric_frame_defaults
from astropy.units import allclose as quantity_allclose
from astropy.coordinates.errors import ConvertError
from astropy.coordinates import representation as r
def test_api():
# transform observed Barycentric velocities to full-space Galactocentric
with galactocentric_frame_defaults.set('latest'):
gc_frame = Galactocentric()
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=101*u.pc,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(gc_frame)
# transform a set of ICRS proper motions to Galactic
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr)
icrs.transform_to(Galactic)
# transform a Barycentric RV to a GSR RV
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=1.*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(Galactocentric)
all_kwargs = [
dict(ra=37.4*u.deg, dec=-55.8*u.deg),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s),
# Now test other representation/differential types:
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type='cartesian'),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type=r.CartesianRepresentation),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type='cartesian'),
]
@pytest.mark.parametrize('kwargs', all_kwargs)
def test_all_arg_options(kwargs):
# Above is a list of all possible valid combinations of arguments.
# Here we do a simple thing and just verify that passing them in, we have
# access to the relevant attributes from the resulting object
icrs = ICRS(**kwargs)
gal = icrs.transform_to(Galactic)
repr_gal = repr(gal)
for k in kwargs:
if k == 'differential_type':
continue
getattr(icrs, k)
if 'pm_ra_cosdec' in kwargs: # should have both
assert 'pm_l_cosb' in repr_gal
assert 'pm_b' in repr_gal
assert 'mas / yr' in repr_gal
if 'radial_velocity' not in kwargs:
assert 'radial_velocity' not in repr_gal
if 'radial_velocity' in kwargs:
assert 'radial_velocity' in repr_gal
assert 'km / s' in repr_gal
if 'pm_ra_cosdec' not in kwargs:
assert 'pm_l_cosb' not in repr_gal
assert 'pm_b' not in repr_gal
@pytest.mark.parametrize('cls,lon,lat', [
[bf.ICRS, 'ra', 'dec'], [bf.FK4, 'ra', 'dec'], [bf.FK4NoETerms, 'ra', 'dec'],
[bf.FK5, 'ra', 'dec'], [bf.GCRS, 'ra', 'dec'], [bf.HCRS, 'ra', 'dec'],
[bf.LSR, 'ra', 'dec'], [bf.CIRS, 'ra', 'dec'], [bf.Galactic, 'l', 'b'],
[bf.AltAz, 'az', 'alt'], [bf.Supergalactic, 'sgl', 'sgb'],
[bf.GalacticLSR, 'l', 'b'], [bf.HeliocentricMeanEcliptic, 'lon', 'lat'],
[bf.GeocentricMeanEcliptic, 'lon', 'lat'],
[bf.BarycentricMeanEcliptic, 'lon', 'lat'],
[bf.PrecessedGeocentric, 'ra', 'dec']
])
def test_expected_arg_names(cls, lon, lat):
kwargs = {lon: 37.4*u.deg, lat: -55.8*u.deg, 'distance': 150*u.pc,
f'pm_{lon}_cos{lat}': -21.2*u.mas/u.yr,
f'pm_{lat}': 17.1*u.mas/u.yr,
'radial_velocity': 105.7*u.km/u.s}
frame = cls(**kwargs)
# these data are extracted from the vizier copy of XHIP:
# http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP
_xhip_head = """
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
R D pmRA pmDE Di pmGLon pmGLat RV U V W
HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s)
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
"""[1:-1]
_xhip_data = """
19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9
20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2
21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3
24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9
59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4
87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7
115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6
"""[1:-1]
# in principal we could parse the above as a table, but doing it "manually"
# makes this test less tied to Table working correctly
@pytest.mark.parametrize('hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W',
[[float(val) for val in row.split()] for row in _xhip_data.split('\n')])
def test_xhip_galactic(hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W):
i = ICRS(ra*u.deg, dec*u.deg, dist*u.pc,
pm_ra_cosdec=pmra*u.marcsec/u.yr, pm_dec=pmdec*u.marcsec/u.yr,
radial_velocity=rv*u.km/u.s)
g = i.transform_to(Galactic)
# precision is limited by 2-deciimal digit string representation of pms
assert quantity_allclose(g.pm_l_cosb, pmglon*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
assert quantity_allclose(g.pm_b, pmglat*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
# make sure UVW also makes sense
uvwg = g.cartesian.differentials['s']
# precision is limited by 1-decimal digit string representation of vels
assert quantity_allclose(uvwg.d_x, U*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_y, V*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_z, W*u.km/u.s, atol=.1*u.km/u.s)
@pytest.mark.parametrize('kwargs,expect_success', [
[dict(ra=37.4*u.deg, dec=-55.8*u.deg), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), True],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s), True]
])
def test_frame_affinetransform(kwargs, expect_success):
"""There are already tests in test_transformations.py that check that
an AffineTransform fails without full-space data, but this just checks that
things work as expected at the frame level as well.
"""
with galactocentric_frame_defaults.set('latest'):
icrs = ICRS(**kwargs)
if expect_success:
_ = icrs.transform_to(Galactocentric)
else:
with pytest.raises(ConvertError):
icrs.transform_to(Galactocentric)
def test_differential_type_arg():
"""
Test passing in an explicit differential class to the initializer or
changing the differential class via set_representation_cls
"""
from astropy.coordinates.builtin_frames import ICRS
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type=r.UnitSphericalDifferential)
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type={'s': r.UnitSphericalDifferential})
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra_cosdec=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr)
icrs.set_representation_cls(s=r.UnitSphericalDifferential)
assert quantity_allclose(icrs.pm_ra, 20*u.mas/u.yr)
# incompatible representation and differential
with pytest.raises(TypeError):
ICRS(ra=1*u.deg, dec=60*u.deg,
v_x=1*u.km/u.s, v_y=-2*u.km/u.s, v_z=-2*u.km/u.s,
differential_type=r.CartesianDifferential)
# specify both
icrs = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
v_x=1*u.km/u.s, v_y=2*u.km/u.s, v_z=3*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
assert icrs.x == 1*u.pc
assert icrs.y == 2*u.pc
assert icrs.z == 3*u.pc
assert icrs.v_x == 1*u.km/u.s
assert icrs.v_y == 2*u.km/u.s
assert icrs.v_z == 3*u.km/u.s
def test_slicing_preserves_differential():
icrs = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
icrs2 = icrs.reshape(1,1)[:1,0]
for name in icrs.representation_component_names.keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
for name in icrs.get_representation_component_names('s').keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
def test_shorthand_attributes():
# Check that attribute access works
# for array data:
n = 4
icrs1 = ICRS(ra=np.random.uniform(0, 360, n)*u.deg,
dec=np.random.uniform(-90, 90, n)*u.deg,
distance=100*u.pc,
pm_ra_cosdec=np.random.normal(0, 100, n)*u.mas/u.yr,
pm_dec=np.random.normal(0, 100, n)*u.mas/u.yr,
radial_velocity=np.random.normal(0, 100, n)*u.km/u.s)
v = icrs1.velocity
pm = icrs1.proper_motion
assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs1.pm_dec)
# for scalar data:
icrs2 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
v = icrs2.velocity
pm = icrs2.proper_motion
assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs2.pm_dec)
# check that it fails where we expect:
# no distance
rv = 105.7*u.km/u.s
icrs3 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=rv)
with pytest.raises(ValueError):
icrs3.velocity
icrs3.set_representation_cls('cartesian')
assert hasattr(icrs3, 'radial_velocity')
assert quantity_allclose(icrs3.radial_velocity, rv)
icrs4 = ICRS(x=30*u.pc, y=20*u.pc, z=11*u.pc,
v_x=10*u.km/u.s, v_y=10*u.km/u.s, v_z=10*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
icrs4.radial_velocity
def test_negative_distance():
""" Regression test: #7408
Make sure that negative parallaxes turned into distances are handled right
"""
RA = 150 * u.deg
DEC = -11*u.deg
c = ICRS(ra=RA, dec=DEC,
distance=(-10*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=10*u.mas/u.yr,
pm_dec=10*u.mas/u.yr)
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
c = ICRS(ra=RA, dec=DEC,
distance=(-10*u.mas).to(u.pc, u.parallax()))
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
def test_velocity_units():
"""Check that the differential data given has compatible units
with the time-derivative of representation data"""
with pytest.raises(ValueError) as excinfo:
c = ICRS(
x=1, y=2, z=3,
v_x=1, v_y=2, v_z=3,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
assert "data units are not compatible with" in str(excinfo.value)
| bsd-3-clause |
the01/python-paps | paps/person.py | 1 | 4994 | # -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
__author__ = "d01"
__email__ = "jungflor@gmail.com"
__copyright__ = "Copyright (C) 2015-16, Florian JUNG"
__license__ = "MIT"
__version__ = "1.0.0"
__date__ = "2016-03-31"
# Created: 2015-04-04 14:21
""" Member of audience """
class Person(object):
""" Class representing a person in the audience participation system """
BITS_PER_PERSON = 1
""" How many bits are necessary to encode one person """
def __init__(self, id=None, sitting=False):
"""
Initialize object
:param id: Unique id of person
:type id: None | str | unicode | int
:param sitting: Is this person sitting
:type sitting: bool
:rtype: None
"""
super(Person, self).__init__()
self.sitting = sitting
""" Is this person sitting
:type sitting: bool """
self.id = id
""" Id of person
:type id: None | str | unicode | int """
def to_dict(self):
"""
Convert this person to a dict
:return: Dictionary representing this person
:rtype: dict
"""
return {
'sitting': self.sitting,
'id': self.id
}
def from_dict(self, d):
"""
Set this person from dict
:param d: Dictionary representing a person ('sitting'[, 'id'])
:type d: dict
:rtype: Person
:raises KeyError: 'sitting' not set
"""
self.sitting = d['sitting']
self.id = d.get('id', None)
return self
def to_tuple(self):
"""
Convert this person to a tuple
:return: Tuple representing this person
:rtype: (None | str, bool)
"""
return self.id, self.sitting,
def from_tuple(self, t):
"""
Set this person from tuple
:param t: Tuple representing a person (sitting[, id])
:type t: (bool) | (bool, None | str | unicode | int)
:rtype: Person
"""
if len(t) > 1:
self.id = t[0]
self.sitting = t[1]
else:
self.sitting = t[0]
self.id = None
return self
def to_bits(self):
"""
Convert this person to bits (ignores the id)
:return: Bits representing this person
:rtype: bytearray
"""
# TODO include id
return bytearray([
int(self.sitting)
])
def from_bits(self, bits):
"""
Set this person from bits (ignores the id)
:param bits: Bits representing a person
:type bits: bytearray
:rtype: Person
:raises ValueError: Bits has an unexpected length
"""
# TODO include id
if len(bits) != Person.BITS_PER_PERSON:
raise ValueError(u"Person requires exactly {} bits".format(
Person.BITS_PER_PERSON
))
self.sitting = bool(bits[0])
return self
@staticmethod
def from_person(person):
"""
Copy person
:param person: Person to copy into new instance
:type person: Person
:rtype: Person
"""
p = Person()
p.id = person.id
p.sitting = person.sitting
return p
def __cmp__(self, other):
"""
Compare two people with each other
If a field is set (not none/empty) that is bigger than an unset one.
Only compares two fields, if they are set in both articles
compare hierarchy (if present and should be compared):
- id
- sitting
:param other: Other person to compare to
:type other: Person
:return: -1: self < other
0: self == other (=False)
+1: self > other
:rtype: int
"""
if self.id < other.id:
return -1
elif self.id > other.id:
return 1
if self.sitting < other.sitting:
return -1
elif self.sitting > other.sitting:
return 1
return 0
def __lt__(self, other):
"""
This smaller than other
:param other: Other person to compare to
:type other: Person
:return: Am I smaller
:rtype: bool
"""
return self.__cmp__(other) < 0
def __eq__(self, other):
"""
This equal to other
:param other: Other person to compare to
:type other: Person
:return: Am I equal
:rtype: bool
"""
return self.__cmp__(other) == 0
def __str__(self):
return "<{}>({}; Sitting:{})".format(
type(self).__name__, self.id, self.sitting
)
def __unicode__(self):
return u"<{}>({}; Sitting:{})".format(
type(self).__name__, self.id, self.sitting
)
| mit |
vipul-sharma20/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_literal_blocks.py | 19 | 6929 | #! /usr/bin/env python
# $Id: test_literal_blocks.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['indented_literal_blocks'] = [
["""\
A paragraph::
A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A literal block.
"""],
["""\
A paragraph with a space after the colons:: \n\
A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph with a space after the colons:
<literal_block xml:space="preserve">
A literal block.
"""],
["""\
A paragraph::
A literal block.
Another paragraph::
Another literal block.
With two blank lines following.
A final paragraph.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A literal block.
<paragraph>
Another paragraph:
<literal_block xml:space="preserve">
Another literal block.
With two blank lines following.
<paragraph>
A final paragraph.
"""],
["""\
A paragraph
on more than
one line::
A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph
on more than
one line:
<literal_block xml:space="preserve">
A literal block.
"""],
["""\
A paragraph
on more than
one line::
A literal block
with no blank line above.
""",
"""\
<document source="test data">
<paragraph>
A paragraph
on more than
one line:
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Unexpected indentation.
<literal_block xml:space="preserve">
A literal block
with no blank line above.
"""],
["""\
A paragraph::
A literal block.
no blank line
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A literal block.
<system_message level="2" line="4" source="test data" type="WARNING">
<paragraph>
Literal block ends without a blank line; unexpected unindent.
<paragraph>
no blank line
"""],
[r"""
A paragraph\\::
A literal block.
A paragraph\::
Not a literal block.
""",
r"""<document source="test data">
<paragraph>
A paragraph\:
<literal_block xml:space="preserve">
A literal block.
<paragraph>
A paragraph::
<block_quote>
<paragraph>
Not a literal block.
"""],
[r"""
\\::
A literal block.
\::
Not a literal block.
""",
r"""<document source="test data">
<paragraph>
\:
<literal_block xml:space="preserve">
A literal block.
<paragraph>
::
<block_quote>
<paragraph>
Not a literal block.
"""],
["""\
A paragraph: ::
A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A literal block.
"""],
["""\
A paragraph:
::
A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A literal block.
"""],
["""\
A paragraph:
::
A literal block.
""",
"""\
<document source="test data">
<system_message level="1" line="2" source="test data" type="INFO">
<paragraph>
Possible title underline, too short for the title.
Treating it as ordinary text because it's so short.
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A literal block.
"""],
["""\
A paragraph:
::
A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A literal block.
"""],
["""\
A paragraph::
Not a literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<system_message level="2" line="3" source="test data" type="WARNING">
<paragraph>
Literal block expected; none found.
<paragraph>
Not a literal block.
"""],
["""\
A paragraph::
A wonky literal block.
Literal line 2.
Literal line 3.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
A wonky literal block.
Literal line 2.
\n\
Literal line 3.
"""],
["""\
EOF, even though a literal block is indicated::
""",
"""\
<document source="test data">
<paragraph>
EOF, even though a literal block is indicated:
<system_message level="2" line="2" source="test data" type="WARNING">
<paragraph>
Literal block expected; none found.
"""],
]
totest['quoted_literal_blocks'] = [
["""\
A paragraph::
> A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
> A literal block.
"""],
["""\
A paragraph::
> A literal block.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
> A literal block.
"""],
["""\
A paragraph::
> A literal block.
> Line 2.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
> A literal block.
> Line 2.
"""],
["""\
A paragraph::
> A literal block.
Indented line.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
> A literal block.
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Unexpected indentation.
<block_quote>
<paragraph>
Indented line.
"""],
["""\
A paragraph::
> A literal block.
Text.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
> A literal block.
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Inconsistent literal block quoting.
<paragraph>
Text.
"""],
["""\
A paragraph::
> A literal block.
$ Inconsistent line.
""",
"""\
<document source="test data">
<paragraph>
A paragraph:
<literal_block xml:space="preserve">
> A literal block.
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Inconsistent literal block quoting.
<paragraph>
$ Inconsistent line.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 |
VRToxin-AOSP/android_kernel_lge_bullhead | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
brain-research/LeaveNoTrace | envs/cliff_envs.py | 1 | 4534 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gym.envs.mujoco.half_cheetah import HalfCheetahEnv
from gym.envs.mujoco.walker2d import Walker2dEnv
from gym.envs.mujoco import mujoco_env
import numpy as np
import os
def tolerance(x, bounds, margin):
'''Returns 1 when x is within the bounds, and decays sigmoidally
when x is within a certain margin outside the bounds.
We've copied the function from [1] to reduce dependencies.
[1] Tassa, Yuval, et al. "DeepMind Control Suite." arXiv preprint
arXiv:1801.00690 (2018).
'''
(lower, upper) = bounds
if lower <= x <= upper:
return 0
elif x < lower:
dist_from_margin = lower - x
else:
assert x > upper
dist_from_margin = x - upper
loss_at_margin = 0.95
w = np.arctanh(np.sqrt(loss_at_margin)) / margin
s = np.tanh(w * dist_from_margin)
return s*s
def huber(x, p):
return np.sqrt(x*x + p*p) - p
class CliffCheetahEnv(HalfCheetahEnv):
def __init__(self):
envs_folder = os.path.dirname(os.path.abspath(__file__))
xml_filename = os.path.join(envs_folder,
'assets/cliff_cheetah.xml')
mujoco_env.MujocoEnv.__init__(self, xml_filename, 5)
def step(self, a):
(s, _, done, info) = super(CliffCheetahEnv, self).step(a)
r = self._get_rewards(s, a)[0]
return (s, r, done, info)
def _get_obs(self):
'''Modified to include the x coordinate.'''
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
])
def _get_rewards(self, s, a):
(x, z, theta) = s[:3]
xvel = s[9]
# Reward the forward agent for running 9 - 11 m/s.
forward_reward = (1.0 - tolerance(xvel, (9, 11), 7))
theta_reward = 1.0 - tolerance(theta,
bounds=(-0.05, 0.05),
margin=0.1)
# Reward the reset agent for being at the origin, plus
# reward shaping to be near the origin and upright.
reset_reward = 0.8 * (np.abs(x) < 0.5) + 0.1 * (1 - 0.2 * np.abs(x)) + 0.1 * theta_reward
return (forward_reward, reset_reward)
class CliffWalkerEnv(Walker2dEnv):
def __init__(self):
envs_folder = os.path.dirname(os.path.abspath(__file__))
xml_filename = os.path.join(envs_folder,
'assets/cliff_walker.xml')
mujoco_env.MujocoEnv.__init__(self, xml_filename, 5)
def step(self, a):
(s, _, done, info) = super(CliffWalkerEnv, self).step(a)
r = self._get_rewards(s, a)[0]
return (s, r, done, info)
def _get_obs(self):
'''Modified to include the x coordinate.'''
qpos = self.model.data.qpos
qvel = self.model.data.qvel
return np.concatenate([qpos[:], np.clip(qvel, -10, 10)]).ravel()
def _get_rewards(self, s, a):
x = s[0]
running_vel = s[9] - 2.0
torso_height = s[1]
is_standing = float(torso_height > 1.2)
is_falling = float(torso_height < 0.7)
run_reward = np.clip(1 - 0.2 * huber(running_vel, p=0.1), 0, 1)
stand_reward = np.clip(0.25 * torso_height +
0.25 * is_standing +
0.5 * (1 - is_falling), 0, 1)
control_reward = np.clip(1 - 0.05 * np.dot(a, a), 0, 1)
reset_location_reward = 0.8 * (np.abs(x) < 0.5) + 0.2 * (1 - 0.2 * np.abs(x))
forward_reward = 0.5 * run_reward + 0.25 * stand_reward + 0.25 * control_reward
reset_reward = 0.5 * reset_location_reward + 0.25 * stand_reward + 0.25 * control_reward
return (forward_reward, reset_reward)
if __name__ == '__main__':
import time
# env = CliffCheetahEnv()
env = CliffWalkerEnv()
env.reset()
for _ in range(10000):
action = env.action_space.sample()
env.step(action)
env.render()
time.sleep(0.01)
| apache-2.0 |
pongem/python-bot-project | appengine/standard/botapp/env/lib/python2.7/site-packages/django/contrib/sessions/backends/base.py | 47 | 12304 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class UpdateError(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
__not_given = object()
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=__not_given):
self.modified = self.modified or key in self._session
args = () if default is self.__not_given else (default,)
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, while retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
if key:
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() only updates an existing object and does not create one
(an UpdateError is raised).
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| apache-2.0 |
junfenggood/goflow | goflow/apptools/models.py | 9 | 3509 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
from django.db import models
from goflow.workflow.models import Transition
from goflow.workflow.decorators import allow_tags
from django.conf import settings
class DefaultAppModel(models.Model):
"""Default implementation object class for process instances.
When a process instance starts, the instance has to carry an
implementation object that contains the application data. The
specifications for the implementation class is:
(nothing: now managed by generic relation)
This model is used in process simulations: you don't have to define
application in activities for this; the DefaultAppModel is used
to keep workflow history for displaying to users.
"""
history = models.TextField(editable=False, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
def __unicode__(self):
return 'simulation model %s' % str(self.id)
class Admin:
list_display = ('__unicode__',)
class Meta:
verbose_name='Simulation object'
class Image(models.Model):
'''
An image stored in the database
'''
category = models.CharField(max_length=20, null=True, blank=True)
file = models.ImageField(upload_to='images')
def url(self):
return "%s%s" % (settings.MEDIA_URL, self.file)
@allow_tags
def graphic(self):
'''
generates an *img* html tag for html rendering
'''
return '<img name="image%d" src="%s">' % (self.pk, self.url())
@allow_tags
def graphic_input(self):
'''
generates an *input* html tag with type=image for html rendering
'''
return '<input type=image name=icon src=%s>' % self.get_file_url()
def __unicode__(self):
return str(self.file)
class Icon(models.Model):
'''
An image accessible by an url.
Tip: all of the "Image" objects can be imported as "Icon" from the
admin panel.
'''
category = models.CharField(max_length=20, null=True, blank=True)
url = models.URLField(verify_exists=False)
@allow_tags
def graphic(self):
'''
generates an *img* html tag for html rendering
'''
return '<img name="image%d" src="%s">' % (self.pk, self.url)
@allow_tags
def graphic_input(self):
'''
generates an *input* html tag with type=image for html rendering
'''
return '<input type=image name=icon src="%s">' % self.url
def __unicode__(self):
return self.url
class ImageButton(models.Model):
'''
Mapping object between an "action" and an "Icon".
ImageButton objects have also a textual field: label.
'''
action = models.SlugField(primary_key=True)
label = models.CharField(max_length=100)
icon = models.ForeignKey(Icon)
@allow_tags
def graphic(self):
'''
generates an *img* html tag for html rendering
'''
return '<img name="image-%s" src="%s">' % (self.pk, self.icon.url)
@allow_tags
def graphic_input(self):
'''
generates an *input* html tag with type=image for html rendering
'''
return '<input type=image name=image src="%s" value="%s" title="%s">' % (self.icon.url, self.pk, self.label)
def __unicode__(self):
return self.label
| bsd-3-clause |
RiccardoPecora/MP | Lib/site-packages/scipy/ndimage/fourier.py | 55 | 9657 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import types
import numpy
import _ni_support
import _nd_image
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype = input.dtype)
else:
output = numpy.zeros(input.shape, dtype = numpy.float64)
return_value = output
elif type(output) is types.TypeType:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype = output)
return_value = output
else:
if output.shape != input.shape:
raise RuntimeError("output shape not correct")
return_value = None
return output, return_value
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype = input.dtype)
else:
output = numpy.zeros(input.shape, dtype = numpy.complex128)
return_value = output
elif type(output) is types.TypeType:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype = output)
return_value = output
else:
if output.shape != input.shape:
raise RuntimeError("output shape not correct")
return_value = None
return output, return_value
def fourier_gaussian(input, sigma, n = -1, axis = -1, output = None):
"""
Multi-dimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype = numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return return_value
def fourier_uniform(input, size, n = -1, axis = -1, output = None):
"""
Multi-dimensional uniform fourier filter.
The array is multiplied with the fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype = numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return return_value
def fourier_ellipsoid(input, size, n = -1, axis = -1, output = None):
"""
Multi-dimensional ellipsoid fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype = numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return return_value
def fourier_shift(input, shift, n = -1, axis = -1, output = None):
"""
Multi-dimensional fourier shift filter.
The array is multiplied with the fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
return_value : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier_complex(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype = numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return return_value
| gpl-3.0 |
tallypokemap/Monocle | monocle/spawns.py | 7 | 5784 | import sys
from collections import deque, OrderedDict
from time import time
from itertools import chain
from hashlib import sha256
from . import bounds, db, sanitized as conf
from .shared import get_logger
from .utils import dump_pickle, load_pickle, get_current_hour, time_until_time
class BaseSpawns:
"""Manage spawn points and times"""
def __init__(self):
## Spawns with known times
# {(lat, lon): (spawn_id, spawn_seconds)}
self.known = OrderedDict()
# {spawn_id: despawn_seconds}
self.despawn_times = {}
## Spawns with unknown times
# {(lat, lon)}
self.unknown = set()
self.class_version = 3
self.db_hash = sha256(conf.DB_ENGINE.encode()).digest()
self.log = get_logger('spawns')
def __len__(self):
return len(self.despawn_times)
def __bool__(self):
return len(self.despawn_times) > 0
def update(self):
bound = bool(bounds)
last_migration = conf.LAST_MIGRATION
with db.session_scope() as session:
query = session.query(db.Spawnpoint)
if bound or conf.STAY_WITHIN_MAP:
query = query.filter(db.Spawnpoint.lat >= bounds.south,
db.Spawnpoint.lat <= bounds.north,
db.Spawnpoint.lon >= bounds.west,
db.Spawnpoint.lon <= bounds.east)
known = {}
for spawn in query:
point = spawn.lat, spawn.lon
# skip if point is not within boundaries (if applicable)
if bound and point not in bounds:
continue
if not spawn.updated or spawn.updated <= last_migration:
self.unknown.add(point)
continue
if spawn.duration == 60:
spawn_time = spawn.despawn_time
else:
spawn_time = (spawn.despawn_time + 1800) % 3600
self.despawn_times[spawn.spawn_id] = spawn.despawn_time
known[point] = spawn.spawn_id, spawn_time
self.known = OrderedDict(sorted(known.items(), key=lambda k: k[1][1]))
def after_last(self):
try:
k = next(reversed(self.known))
seconds = self.known[k][1]
return time() % 3600 > seconds
except (StopIteration, KeyError, TypeError):
return False
def get_despawn_time(self, spawn_id, seen):
hour = get_current_hour(now=seen)
try:
despawn_time = self.despawn_times[spawn_id] + hour
if seen > despawn_time:
despawn_time += 3600
return despawn_time
except KeyError:
return None
def unpickle(self):
try:
state = load_pickle('spawns', raise_exception=True)
if all((state['class_version'] == self.class_version,
state['db_hash'] == self.db_hash,
state['bounds_hash'] == hash(bounds),
state['last_migration'] == conf.LAST_MIGRATION)):
self.__dict__.update(state)
return True
else:
self.log.warning('Configuration changed, reloading spawns from DB.')
except FileNotFoundError:
self.log.warning('No spawns pickle found, will create one.')
except (TypeError, KeyError):
self.log.warning('Obsolete or invalid spawns pickle type, reloading from DB.')
return False
def pickle(self):
state = self.__dict__.copy()
del state['log']
state.pop('cells_count', None)
state['bounds_hash'] = hash(bounds)
state['last_migration'] = conf.LAST_MIGRATION
dump_pickle('spawns', state)
@property
def total_length(self):
return len(self.despawn_times) + len(self.unknown) + self.cells_count
class Spawns(BaseSpawns):
def __init__(self):
super().__init__()
self.cells_count = 0
def items(self):
return self.known.items()
def add_known(self, spawn_id, despawn_time, point):
self.despawn_times[spawn_id] = despawn_time
self.unknown.discard(point)
def add_unknown(self, point):
self.unknown.add(point)
def unpickle(self):
result = super().unpickle()
try:
del self.cell_points
except AttributeError:
pass
return result
def mystery_gen(self):
for mystery in self.unknown.copy():
yield mystery
class MoreSpawns(BaseSpawns):
def __init__(self):
super().__init__()
## Coordinates mentioned as "spawn_points" in GetMapObjects response
## May or may not be actual spawn points, more research is needed.
# {(lat, lon)}
self.cell_points = set()
def items(self):
# return a copy since it may be modified
return self.known.copy().items()
def add_known(self, spawn_id, despawn_time, point):
self.despawn_times[spawn_id] = despawn_time
# add so that have_point() will be up to date
self.known[point] = None
self.unknown.discard(point)
self.cell_points.discard(point)
def add_unknown(self, point):
self.unknown.add(point)
self.cell_points.discard(point)
def have_point(self, point):
return point in chain(self.cell_points, self.known, self.unknown)
def mystery_gen(self):
for mystery in chain(self.unknown.copy(), self.cell_points.copy()):
yield mystery
@property
def cells_count(self):
return len(self.cell_points)
sys.modules[__name__] = MoreSpawns() if conf.MORE_POINTS else Spawns()
| mit |
swdream/neutron | neutron/tests/functional/agent/l3/test_namespace_manager.py | 25 | 3626 | # Copyright (c) 2015 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.tests.functional import base
_uuid = uuidutils.generate_uuid
class NamespaceManagerTestFramework(base.BaseSudoTestCase):
def setUp(self):
super(NamespaceManagerTestFramework, self).setUp()
self.agent_conf = mock.MagicMock()
self.agent_conf.router_delete_namespaces = True
self.metadata_driver_mock = mock.Mock()
self.namespace_manager = namespace_manager.NamespaceManager(
self.agent_conf, driver=None, clean_stale=True,
metadata_driver=self.metadata_driver_mock)
def _create_namespace(self, router_id, ns_class):
namespace = ns_class(router_id, self.agent_conf, driver=None,
use_ipv6=False)
namespace.create()
self.addCleanup(self._delete_namespace, namespace)
return namespace.name
def _delete_namespace(self, namespace):
try:
namespace.delete()
except RuntimeError as e:
# If the namespace didn't exist when delete was attempted, mission
# accomplished. Otherwise, re-raise the exception
if 'No such file or directory' not in str(e):
raise e
def _namespace_exists(self, namespace):
ip = ip_lib.IPWrapper(namespace=namespace)
return ip.netns.exists(namespace)
class NamespaceManagerTestCase(NamespaceManagerTestFramework):
def test_namespace_manager(self):
router_id = _uuid()
router_id_to_delete = _uuid()
to_keep = set()
to_delete = set()
to_retrieve = set()
to_keep.add(self._create_namespace(router_id,
namespaces.RouterNamespace))
to_keep.add(self._create_namespace(router_id,
dvr_snat_ns.SnatNamespace))
to_delete.add(self._create_namespace(router_id_to_delete,
dvr_snat_ns.SnatNamespace))
to_retrieve = to_keep | to_delete
with mock.patch.object(namespace_manager.NamespaceManager, 'list_all',
return_value=to_retrieve):
with self.namespace_manager as ns_manager:
for ns_name in to_keep:
id_to_keep = ns_manager.get_prefix_and_id(ns_name)[1]
ns_manager.keep_router(id_to_keep)
for ns_name in to_keep:
self.assertTrue(self._namespace_exists(ns_name))
for ns_name in to_delete:
(self.metadata_driver_mock.destroy_monitored_metadata_proxy.
assert_called_once_with(mock.ANY,
router_id_to_delete,
self.agent_conf))
self.assertFalse(self._namespace_exists(ns_name))
| apache-2.0 |
doganov/edx-platform | openedx/core/djangoapps/safe_sessions/middleware.py | 4 | 17853 | """
This module defines SafeSessionMiddleware that makes use of a
SafeCookieData that cryptographically binds the user to the session id
in the cookie.
The implementation is inspired by the proposal in the following paper:
http://www.cse.msu.edu/~alexliu/publications/Cookie/cookie.pdf
Note: The proposed protocol protects against replay attacks by
incorporating the session key used in the SSL connection. However,
this does not suit our needs since we want the ability to reuse the
same cookie over multiple SSL connections. So instead, we mitigate
replay attacks by enforcing session cookie expiration
(via TimestampSigner) and assuming SESSION_COOKIE_SECURE (see below).
We use django's built-in Signer class, which makes use of a built-in
salted_hmac function that derives a usage-specific key from the
server's SECRET_KEY, as proposed in the paper.
Note: The paper proposes deriving a usage-specific key from the
session's expiration time in order to protect against volume attacks.
However, since django does not always use an expiration time, we
instead use a random key salt to prevent volume attacks.
In fact, we actually use a specialized subclass of Signer called
TimestampSigner. This signer binds a timestamp along with the signed
data and verifies that the signature has not expired. We do this
since django's session stores do not actually verify the expiration
of the session cookies. Django instead relies on the browser to honor
session cookie expiration.
The resulting safe cookie data that gets stored as the value in the
session cookie is a tuple of:
(
version,
session_id,
key_salt,
signature
)
where signature is:
signed_data : base64(HMAC_SHA1(signed_data, usage_key))
where signed_data is:
H(version | session_id | user_id) : timestamp
where usage_key is:
SHA1(key_salt + 'signer' + settings.SECRET_KEY)
Note: We assume that the SESSION_COOKIE_SECURE setting is set to
TRUE to prevent inadvertent leakage of the session cookie to a
person-in-the-middle. The SESSION_COOKIE_SECURE flag indicates
to the browser that the cookie should be sent only over an
SSL-protected channel. Otherwise, a session hijacker could copy
the entire cookie and use it to impersonate the victim.
"""
from django.conf import settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.views import redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.core import signing
from django.http import HttpResponse
from django.utils.crypto import get_random_string
from hashlib import sha256
from logging import getLogger
from openedx.core.lib.mobile_utils import is_request_from_mobile_app
log = getLogger(__name__)
class SafeCookieError(Exception):
"""
An exception class for safe cookie related errors.
"""
def __init__(self, error_message):
super(SafeCookieError, self).__init__(error_message)
log.error(error_message)
class SafeCookieData(object):
"""
Cookie data that cryptographically binds and timestamps the user
to the session id. It verifies the freshness of the cookie by
checking its creation date using settings.SESSION_COOKIE_AGE.
"""
CURRENT_VERSION = '1'
SEPARATOR = u"|"
def __init__(self, version, session_id, key_salt, signature):
"""
Arguments:
version (string): The data model version of the safe cookie
data that is checked for forward and backward
compatibility.
session_id (string): Unique and unguessable session
identifier to which this safe cookie data is bound.
key_salt (string): A securely generated random string that
is used to derive a usage-specific secret key for
signing the safe cookie data to protect against volume
attacks.
signature (string): Cryptographically created signature
for the safe cookie data that binds the session_id
and its corresponding user as described at the top of
this file.
"""
self.version = version
self.session_id = session_id
self.key_salt = key_salt
self.signature = signature
@classmethod
def create(cls, session_id, user_id):
"""
Factory method for creating the cryptographically bound
safe cookie data for the session and the user.
Raises SafeCookieError if session_id is None.
"""
cls._validate_cookie_params(session_id, user_id)
safe_cookie_data = SafeCookieData(
cls.CURRENT_VERSION,
session_id,
key_salt=get_random_string(),
signature=None,
)
safe_cookie_data.sign(user_id)
return safe_cookie_data
@classmethod
def parse(cls, safe_cookie_string):
"""
Factory method that parses the serialized safe cookie data,
verifies the version, and returns the safe cookie object.
Raises SafeCookieError if there are any issues parsing the
safe_cookie_string.
"""
try:
raw_cookie_components = safe_cookie_string.split(cls.SEPARATOR)
safe_cookie_data = SafeCookieData(*raw_cookie_components)
except TypeError:
raise SafeCookieError(
"SafeCookieData BWC parse error: {0!r}.".format(safe_cookie_string)
)
else:
if safe_cookie_data.version != cls.CURRENT_VERSION:
raise SafeCookieError(
"SafeCookieData version {0!r} is not supported. Current version is {1}.".format(
safe_cookie_data.version,
cls.CURRENT_VERSION,
))
return safe_cookie_data
def __unicode__(self):
"""
Returns a string serialization of the safe cookie data.
"""
return self.SEPARATOR.join([self.version, self.session_id, self.key_salt, self.signature])
def sign(self, user_id):
"""
Computes the signature of this safe cookie data.
A signed value of hash(version | session_id | user_id):timestamp
with a usage-specific key derived from key_salt.
"""
data_to_sign = self._compute_digest(user_id)
self.signature = signing.dumps(data_to_sign, salt=self.key_salt)
def verify(self, user_id):
"""
Verifies the signature of this safe cookie data.
Successful verification implies this cookie data is fresh
(not expired) and bound to the given user.
"""
try:
unsigned_data = signing.loads(self.signature, salt=self.key_salt, max_age=settings.SESSION_COOKIE_AGE)
if unsigned_data == self._compute_digest(user_id):
return True
log.error("SafeCookieData '%r' is not bound to user '%s'.", unicode(self), user_id)
except signing.BadSignature as sig_error:
log.error(
"SafeCookieData signature error for cookie data {0!r}: {1}".format( # pylint: disable=logging-format-interpolation
unicode(self),
sig_error.message,
)
)
return False
def _compute_digest(self, user_id):
"""
Returns hash(version | session_id | user_id |)
"""
hash_func = sha256()
for data_item in [self.version, self.session_id, user_id]:
hash_func.update(unicode(data_item))
hash_func.update('|')
return hash_func.hexdigest()
@staticmethod
def _validate_cookie_params(session_id, user_id):
"""
Validates the given parameters for cookie creation.
Raises SafeCookieError if session_id is None.
"""
# Compare against unicode(None) as well since the 'value'
# property of a cookie automatically serializes None to a
# string.
if not session_id or session_id == unicode(None):
# The session ID should always be valid in the cookie.
raise SafeCookieError(
"SafeCookieData not created due to invalid value for session_id '{}' for user_id '{}'.".format(
session_id,
user_id,
))
if not user_id:
# The user ID is sometimes not set for
# 3rd party Auth and external Auth transactions
# as some of the session requests are made as
# Anonymous users.
log.warning(
"SafeCookieData received empty user_id '%s' for session_id '%s'.",
user_id,
session_id,
)
class SafeSessionMiddleware(SessionMiddleware):
"""
A safer middleware implementation that uses SafeCookieData instead
of just the session id to lookup and verify a user's session.
"""
def process_request(self, request):
"""
Processing the request is a multi-step process, as follows:
Step 1. The safe_cookie_data is parsed and verified from the
session cookie.
Step 2. The session_id is retrieved from the safe_cookie_data
and stored in place of the session cookie value, to be used by
Django's Session middleware.
Step 3. Call Django's Session Middleware to find the session
corresponding to the session_id and to set the session in the
request.
Step 4. Once the session is retrieved, verify that the user
bound in the safe_cookie_data matches the user attached to the
server's session information.
Step 5. If all is successful, the now verified user_id is stored
separately in the request object so it is available for another
final verification before sending the response (in
process_response).
"""
cookie_data_string = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
if cookie_data_string:
try:
safe_cookie_data = SafeCookieData.parse(cookie_data_string) # Step 1
except SafeCookieError:
# For security reasons, we don't support requests with
# older or invalid session cookie models.
return self._on_user_authentication_failed(request)
else:
request.COOKIES[settings.SESSION_COOKIE_NAME] = safe_cookie_data.session_id # Step 2
process_request_response = super(SafeSessionMiddleware, self).process_request(request) # Step 3
if process_request_response:
# The process_request pipeline has been short circuited so
# return the response.
return process_request_response
if cookie_data_string and request.session.get(SESSION_KEY):
user_id = self.get_user_id_from_session(request)
if safe_cookie_data.verify(user_id): # Step 4
request.safe_cookie_verified_user_id = user_id # Step 5
else:
return self._on_user_authentication_failed(request)
def process_response(self, request, response):
"""
When creating a cookie for the response, a safe_cookie_data
is created and put in place of the session_id in the session
cookie.
Also, the session cookie is deleted if prior verification failed
or the designated user in the request has changed since the
original request.
Processing the response is a multi-step process, as follows:
Step 1. Call the parent's method to generate the basic cookie.
Step 2. Verify that the user marked at the time of
process_request matches the user at this time when processing
the response. If not, log the error.
Step 3. If a cookie is being sent with the response, update
the cookie by replacing its session_id with a safe_cookie_data
that binds the session and its corresponding user.
Step 4. Delete the cookie, if it's marked for deletion.
"""
response = super(SafeSessionMiddleware, self).process_response(request, response) # Step 1
if not _is_cookie_marked_for_deletion(request) and _is_cookie_present(response):
try:
user_id_in_session = self.get_user_id_from_session(request)
self._verify_user(request, user_id_in_session) # Step 2
# Use the user_id marked in the session instead of the
# one in the request in case the user is not set in the
# request, for example during Anonymous API access.
self.update_with_safe_session_cookie(response.cookies, user_id_in_session) # Step 3
except SafeCookieError:
_mark_cookie_for_deletion(request)
if _is_cookie_marked_for_deletion(request):
_delete_cookie(response) # Step 4
return response
@staticmethod
def _on_user_authentication_failed(request):
"""
To be called when user authentication fails when processing
requests in the middleware. Sets a flag to delete the user's
cookie and redirects the user to the login page.
"""
_mark_cookie_for_deletion(request)
# Mobile apps have custom handling of authentication failures. They
# should *not* be redirected to the website's login page.
if is_request_from_mobile_app(request):
return HttpResponse(status=401)
return redirect_to_login(request.path)
@staticmethod
def _verify_user(request, userid_in_session):
"""
Logs an error if the user marked at the time of process_request
does not match either the current user in the request or the
given userid_in_session.
"""
if hasattr(request, 'safe_cookie_verified_user_id'):
if request.safe_cookie_verified_user_id != request.user.id:
log.warning(
"SafeCookieData user at request '{0}' does not match user at response: '{1}'".format( # pylint: disable=logging-format-interpolation
request.safe_cookie_verified_user_id,
request.user.id,
),
)
if request.safe_cookie_verified_user_id != userid_in_session:
log.error(
"SafeCookieData user at request '{0}' does not match user in session: '{1}'".format( # pylint: disable=logging-format-interpolation
request.safe_cookie_verified_user_id,
userid_in_session,
),
)
@staticmethod
def get_user_id_from_session(request):
"""
Return the user_id stored in the session of the request.
"""
# Starting in django 1.8, the user_id is now serialized
# as a string in the session. Before, it was stored
# directly as an integer. If back-porting to prior to
# django 1.8, replace the implementation of this method
# with:
# return request.session[SESSION_KEY]
from django.contrib.auth import _get_user_session_key
try:
return _get_user_session_key(request)
except KeyError:
return None
@staticmethod
def set_user_id_in_session(request, user):
"""
Stores the user_id in the session of the request.
Used by unit tests.
"""
# Starting in django 1.8, the user_id is now serialized
# as a string in the session. Before, it was stored
# directly as an integer. If back-porting to prior to
# django 1.8, replace the implementation of this method
# with:
# request.session[SESSION_KEY] = user.id
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user) # pylint: disable=protected-access
@staticmethod
def update_with_safe_session_cookie(cookies, user_id):
"""
Replaces the session_id in the session cookie with a freshly
computed safe_cookie_data.
"""
# Create safe cookie data that binds the user with the session
# in place of just storing the session_key in the cookie.
safe_cookie_data = SafeCookieData.create(
cookies[settings.SESSION_COOKIE_NAME].value,
user_id,
)
# Update the cookie's value with the safe_cookie_data.
cookies[settings.SESSION_COOKIE_NAME] = unicode(safe_cookie_data)
def _mark_cookie_for_deletion(request):
"""
Updates the given request object to designate that the session
cookie should be deleted.
"""
request.need_to_delete_cookie = True
def _is_cookie_marked_for_deletion(request):
"""
Returns whether the session cookie has been designated for deletion
in the given request object.
"""
return getattr(request, 'need_to_delete_cookie', False)
def _is_cookie_present(response):
"""
Returns whether the session cookie is present in the response.
"""
return (
response.cookies.get(settings.SESSION_COOKIE_NAME) and # cookie in response
response.cookies[settings.SESSION_COOKIE_NAME].value # cookie is not empty
)
def _delete_cookie(response):
"""
Delete the cookie by setting the expiration to a date in the past,
while maintaining the domain, secure, and httponly settings.
"""
response.set_cookie(
settings.SESSION_COOKIE_NAME,
max_age=0,
expires='Thu, 01-Jan-1970 00:00:00 GMT',
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
)
| agpl-3.0 |
edx/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/gis/geos/tests/test_io.py | 321 | 4159 | import binascii, ctypes, unittest
from django.contrib.gis.geos import GEOSGeometry, WKTReader, WKTWriter, WKBReader, WKBWriter, geos_version_info
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = 'POINT (5 23)'
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt)
g2 = wkt_r.read(unicode(wkt))
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept basestring objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, buffer('foo'))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry('POINT (5 23)')
ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)'
self.assertEqual(ref_wkt, wkt_w.write(ref))
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = '000000000140140000000000004037000000000000'
wkb = buffer(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry('POINT (5 23)')
hex1 = '010100000000000000000014400000000000003740'
wkb1 = buffer(binascii.a2b_hex(hex1))
hex2 = '000000000140140000000000004037000000000000'
wkb2 = buffer(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, 'foo', None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry('POINT (5 23 17)')
g.srid = 4326
hex3d = '0101000080000000000000144000000000000037400000000000003140'
wkb3d = buffer(binascii.a2b_hex(hex3d))
hex3d_srid = '01010000A0E6100000000000000000144000000000000037400000000000003140'
wkb3d_srid = buffer(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# These tests will fail on 3.0.0 because of a bug that was fixed in 3.1:
# http://trac.osgeo.org/geos/ticket/216
if not geos_version_info()['version'].startswith('3.0.'):
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to inlcude the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSIOTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-3.0 |
cloudera/whirr | contrib/python/src/py/hadoop/cloud/providers/dummy.py | 16 | 2282 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from hadoop.cloud.cluster import Cluster
from hadoop.cloud.cluster import Instance
logger = logging.getLogger(__name__)
class DummyCluster(Cluster):
@staticmethod
def get_clusters_with_role(role, state="running"):
logger.info("get_clusters_with_role(%s, %s)", role, state)
return ["dummy-cluster"]
def __init__(self, name, config_dir):
super(DummyCluster, self).__init__(name, config_dir)
logger.info("__init__(%s, %s)", name, config_dir)
def get_provider_code(self):
return "dummy"
def authorize_role(self, role, from_port, to_port, cidr_ip):
logger.info("authorize_role(%s, %s, %s, %s)", role, from_port, to_port,
cidr_ip)
def get_instances_in_role(self, role, state_filter=None):
logger.info("get_instances_in_role(%s, %s)", role, state_filter)
return [Instance(1, '127.0.0.1', '127.0.0.1')]
def print_status(self, roles, state_filter="running"):
logger.info("print_status(%s, %s)", roles, state_filter)
def launch_instances(self, role, number, image_id, size_id,
instance_user_data, **kwargs):
logger.info("launch_instances(%s, %s, %s, %s, %s, %s)", role, number,
image_id, size_id, instance_user_data, str(kwargs))
return [1]
def wait_for_instances(self, instance_ids, timeout=600):
logger.info("wait_for_instances(%s, %s)", instance_ids, timeout)
def terminate(self):
logger.info("terminate")
def delete(self):
logger.info("delete")
| apache-2.0 |
kirienko/gourmet | tests/test_interactive_importer.py | 1 | 1569 | import unittest
from gourmet.importers import interactive_importer
class TestConvenientImporter (unittest.TestCase):
def setUp (self):
self.ci = interactive_importer.ConvenientImporter()
def testImport (self):
self.ci.start_rec()
self.ci.add_attribute('title','Test')
self.ci.add_attribute('category','foo')
self.ci.add_attribute('category','bar')
self.ci.add_ings_from_text(
'''6 garlic cloves, peeled
1/2 pound linguine
1/4 cup plus 1 tablespoon olive oil
2 to 2 1/2 pounds small fresh squid (about 10), cleaned and cut into 3/4-inch thick rings, tentacles cut in half*
1 1/2 teaspoons Baby Bam or Emeril's Original Essence, to taste
1/4 cup chopped green onions
1 teaspoon crushed red pepper, or to taste
1/4 teaspoon salt
1/4 cup fish stock, shrimp stock, or water
2 tablespoons fresh lemon juice
1 tablespoon unsalted butter
1/4 cup chopped fresh parsley leaves
1/2 cup freshly grated Parmesan'''
)
self.ci.commit_rec()
rec = self.ci.added_recs[-1]
self.assertEqual(rec.title,'Test')
cats = self.ci.rd.get_cats(rec)
cats.sort()
self.assertEqual(len(cats),2)
self.assertEqual(cats[0],'bar')
self.assertEqual(cats[1],'foo')
ings = self.ci.rd.get_ings(rec)
self.assertEqual(len(ings),13)
self.assertEqual(ings[1].amount,0.5)
self.assertEqual(ings[1].unit,'pound')
self.assertEqual(ings[1].item,'linguine')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
CforED/Machine-Learning | sklearn/cluster/tests/test_k_means.py | 41 | 27789 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:, :2], n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1)
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be', km.fit, X)
| bsd-3-clause |
Fafou/Sick-Beard | sickbeard/sab.py | 40 | 8100 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib, httplib
import datetime
import sickbeard
from lib import MultipartPostHandler
import urllib2, cookielib
try:
import json
except ImportError:
from lib import simplejson as json
from sickbeard.common import USER_AGENT
from sickbeard import logger
from sickbeard.exceptions import ex
def sendNZB(nzb):
"""
Sends an NZB to SABnzbd via the API.
nzb: The NZBSearchResult object to send to SAB
"""
# set up a dict with the URL params in it
params = {}
if sickbeard.SAB_USERNAME != None:
params['ma_username'] = sickbeard.SAB_USERNAME
if sickbeard.SAB_PASSWORD != None:
params['ma_password'] = sickbeard.SAB_PASSWORD
if sickbeard.SAB_APIKEY != None:
params['apikey'] = sickbeard.SAB_APIKEY
if sickbeard.SAB_CATEGORY != None:
params['cat'] = sickbeard.SAB_CATEGORY
# if it aired recently make it high priority
for curEp in nzb.episodes:
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
params['priority'] = 1
# if it's a normal result we just pass SAB the URL
if nzb.resultType == "nzb":
# for newzbin results send the ID to sab specifically
if nzb.provider.getID() == 'newzbin':
id = nzb.provider.getIDFromURL(nzb.url)
if not id:
logger.log("Unable to send NZB to sab, can't find ID in URL " + str(nzb.url), logger.ERROR)
return False
params['mode'] = 'addid'
params['name'] = id
else:
params['mode'] = 'addurl'
params['name'] = nzb.url
# if we get a raw data result we want to upload it to SAB
elif nzb.resultType == "nzbdata":
params['mode'] = 'addfile'
multiPartParams = {"nzbfile": (nzb.name + ".nzb", nzb.extraInfo[0])}
url = sickbeard.SAB_HOST + "api?" + urllib.urlencode(params)
logger.log(u"Sending NZB to SABnzbd")
logger.log(u"URL: " + url, logger.DEBUG)
try:
# if we have the URL to an NZB then we've built up the SAB API URL already so just call it
if nzb.resultType == "nzb":
f = urllib.urlopen(url)
# if we are uploading the NZB data to SAB then we need to build a little POST form and send it
elif nzb.resultType == "nzbdata":
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
MultipartPostHandler.MultipartPostHandler)
req = urllib2.Request(url,
multiPartParams,
headers={'User-Agent': USER_AGENT})
f = opener.open(req)
except (EOFError, IOError), e:
logger.log(u"Unable to connect to SAB: " + ex(e), logger.ERROR)
return False
except httplib.InvalidURL, e:
logger.log(u"Invalid SAB host, check your config: " + ex(e), logger.ERROR)
return False
# this means we couldn't open the connection or something just as bad
if f == None:
logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR)
return False
# if we opened the URL connection then read the result from SAB
try:
result = f.readlines()
except Exception, e:
logger.log(u"Error trying to get result from SAB, NZB not sent: " + ex(e), logger.ERROR)
return False
# SAB shouldn't return a blank result, this most likely (but not always) means that it timed out and didn't recieve the NZB
if len(result) == 0:
logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR)
return False
# massage the result a little bit
sabText = result[0].strip()
logger.log(u"Result text from SAB: " + sabText, logger.DEBUG)
# do some crude parsing of the result text to determine what SAB said
if sabText == "ok":
logger.log(u"NZB sent to SAB successfully", logger.DEBUG)
return True
elif sabText == "Missing authentication":
logger.log(u"Incorrect username/password sent to SAB, NZB not sent", logger.ERROR)
return False
else:
logger.log(u"Unknown failure sending NZB to sab. Return text is: " + sabText, logger.ERROR)
return False
def _checkSabResponse(f):
try:
result = f.readlines()
except Exception, e:
logger.log(u"Error trying to get result from SAB" + ex(e), logger.ERROR)
return False, "Error from SAB"
if len(result) == 0:
logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR)
return False, "No data from SAB"
sabText = result[0].strip()
sabJson = {}
try:
sabJson = json.loads(sabText)
except ValueError, e:
pass
if sabText == "Missing authentication":
logger.log(u"Incorrect username/password sent to SAB", logger.ERROR)
return False, "Incorrect username/password sent to SAB"
elif 'error' in sabJson:
logger.log(sabJson['error'], logger.ERROR)
return False, sabJson['error']
else:
return True, sabText
def _sabURLOpenSimple(url):
try:
f = urllib.urlopen(url)
except (EOFError, IOError), e:
logger.log(u"Unable to connect to SAB: " + ex(e), logger.ERROR)
return False, "Unable to connect"
except httplib.InvalidURL, e:
logger.log(u"Invalid SAB host, check your config: " + ex(e), logger.ERROR)
return False, "Invalid SAB host"
if f == None:
logger.log(u"No data returned from SABnzbd", logger.ERROR)
return False, "No data returned from SABnzbd"
else:
return True, f
def getSabAccesMethod(host=None, username=None, password=None, apikey=None):
url = host + "api?mode=auth"
result, f = _sabURLOpenSimple(url)
if not result:
return False, f
result, sabText = _checkSabResponse(f)
if not result:
return False, sabText
return True, sabText
def testAuthentication(host=None, username=None, password=None, apikey=None):
"""
Sends a simple API request to SAB to determine if the given connection information is connect
host: The host where SAB is running (incl port)
username: The username to use for the HTTP request
password: The password to use for the HTTP request
apikey: The API key to provide to SAB
Returns: A tuple containing the success boolean and a message
"""
# build up the URL parameters
params = {}
params['mode'] = 'queue'
params['output'] = 'json'
params['ma_username'] = username
params['ma_password'] = password
params['apikey'] = apikey
url = host + "api?" + urllib.urlencode(params)
# send the test request
logger.log(u"SABnzbd test URL: " + url, logger.DEBUG)
result, f = _sabURLOpenSimple(url)
if not result:
return False, f
# check the result and determine if it's good or not
result, sabText = _checkSabResponse(f)
if not result:
return False, sabText
return True, "Success"
| gpl-3.0 |
40223249-1/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/unittest/runner.py | 637 | 7485 | """Running tests"""
import sys
import time
import warnings
from . import result
from .signals import registerResult
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=None, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None, warnings=None):
if stream is None:
stream = sys.stderr
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
self.warnings = warnings
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
with warnings.catch_warnings():
if self.warnings:
# if self.warnings is set, use it to filter all the warnings
warnings.simplefilter(self.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when self.warnings is None.
if self.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message='Please use assert\w+ instead.')
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = len(result.failures), len(result.errors)
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| agpl-3.0 |
apixandru/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_print.py | 326 | 2865 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name(u"print"), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name(u"print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = u""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, u"sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, u"end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, u"file", file)
n_stmt = Call(Name(u"print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = u""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, u"="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = u" "
l_nodes.append(n_argument)
| apache-2.0 |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyQt4/QtGui/__init__/QVector4D.py | 2 | 6594 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python2.7/dist-packages/PyQt4/QtGui.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QVector4D(): # skipped bases: <type 'sip.simplewrapper'>
"""
QVector4D()
QVector4D(float, float, float, float)
QVector4D(QPoint)
QVector4D(QPointF)
QVector4D(QVector2D)
QVector4D(QVector2D, float, float)
QVector4D(QVector3D)
QVector4D(QVector3D, float)
QVector4D(QVector4D)
"""
def dotProduct(self, QVector4D, QVector4D_1): # real signature unknown; restored from __doc__
""" QVector4D.dotProduct(QVector4D, QVector4D) -> float """
return 0.0
def isNull(self): # real signature unknown; restored from __doc__
""" QVector4D.isNull() -> bool """
return False
def length(self): # real signature unknown; restored from __doc__
""" QVector4D.length() -> float """
return 0.0
def lengthSquared(self): # real signature unknown; restored from __doc__
""" QVector4D.lengthSquared() -> float """
return 0.0
def normalize(self): # real signature unknown; restored from __doc__
""" QVector4D.normalize() """
pass
def normalized(self): # real signature unknown; restored from __doc__
""" QVector4D.normalized() -> QVector4D """
return QVector4D
def setW(self, p_float): # real signature unknown; restored from __doc__
""" QVector4D.setW(float) """
pass
def setX(self, p_float): # real signature unknown; restored from __doc__
""" QVector4D.setX(float) """
pass
def setY(self, p_float): # real signature unknown; restored from __doc__
""" QVector4D.setY(float) """
pass
def setZ(self, p_float): # real signature unknown; restored from __doc__
""" QVector4D.setZ(float) """
pass
def toPoint(self): # real signature unknown; restored from __doc__
""" QVector4D.toPoint() -> QPoint """
pass
def toPointF(self): # real signature unknown; restored from __doc__
""" QVector4D.toPointF() -> QPointF """
pass
def toVector2D(self): # real signature unknown; restored from __doc__
""" QVector4D.toVector2D() -> QVector2D """
return QVector2D
def toVector2DAffine(self): # real signature unknown; restored from __doc__
""" QVector4D.toVector2DAffine() -> QVector2D """
return QVector2D
def toVector3D(self): # real signature unknown; restored from __doc__
""" QVector4D.toVector3D() -> QVector3D """
return QVector3D
def toVector3DAffine(self): # real signature unknown; restored from __doc__
""" QVector4D.toVector3DAffine() -> QVector3D """
return QVector3D
def w(self): # real signature unknown; restored from __doc__
""" QVector4D.w() -> float """
return 0.0
def x(self): # real signature unknown; restored from __doc__
""" QVector4D.x() -> float """
return 0.0
def y(self): # real signature unknown; restored from __doc__
""" QVector4D.y() -> float """
return 0.0
def z(self): # real signature unknown; restored from __doc__
""" QVector4D.z() -> float """
return 0.0
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __idiv__(self, y): # real signature unknown; restored from __doc__
""" x.__idiv__(y) <==> x/=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __itruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__itruediv__(y) <==> x/y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| gpl-2.0 |
jiumem/cgt | cgt/tests/test_optimizers.py | 14 | 3817 | import cgt
import cgt.nn as nn
from cgt.tests import across_configs
import numpy as np
# Torch values obtained via this script: https://gist.github.com/ebenolson/931e879ed38f257253d2
torch_values = {'sgd': [0.81707280688755, 0.6648326359915, 0.5386151140949],
'momentum': [0.817072808743, 0.664832651615, 0.538615107536],
'nesterov_momentum': [0.817072808743, 0.664832651615, 0.538615107536],
'adagrad': [0.725280165672, 0.621351599693, 0.546548962593],
'rmsprop': [0.940282583237, 0.920392453671, 0.903152763844],
'adadelta': [0.991312503815, 0.988132655621, 0.985075354576]}
scales = [0.1, 0.2, 0.3]
def f(X, scale):
return (scale*X**2).sum()
@across_configs
def test_sgd():
results = []
for scale in scales:
A = cgt.shared(1.0)
B = cgt.shared(1.0)
updates = nn.sgd(f(A, scale) + f(B, scale), [A, B], learning_rate=0.1)
do_update = cgt.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.op.get_value(), B.op.get_value())
results.append(A.op.get_value().copy())
assert np.allclose(results, torch_values['sgd'])
@across_configs
def test_momentum():
results = []
for scale in scales:
A = cgt.shared(1.0)
B = cgt.shared(1.0)
updates = nn.momentum(f(A, scale) + f(B, scale), [A, B], learning_rate=0.1, momentum=0.5)
do_update = cgt.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.op.get_value(), B.op.get_value())
results.append(A.op.get_value().copy())
assert np.allclose(results, torch_values['momentum'])
@across_configs
def test_nesterov_momentum():
results = []
for scale in scales:
A = cgt.shared(1.0)
B = cgt.shared(1.0)
updates = nn.momentum(f(A, scale) + f(B, scale), [A, B], learning_rate=0.1, momentum=0.5)
do_update = cgt.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.op.get_value(), B.op.get_value())
results.append(A.op.get_value().copy())
assert np.allclose(results, torch_values['nesterov_momentum'])
@across_configs
def test_adagrad():
results = []
for scale in scales:
A = cgt.shared(1.0)
B = cgt.shared(1.0)
updates = nn.adagrad(f(A, scale) + f(B, scale), [A, B], learning_rate=0.1)
do_update = cgt.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.op.get_value(), B.op.get_value())
results.append(A.op.get_value().copy())
assert np.allclose(results, torch_values['adagrad'])
@across_configs
def test_rmsprop():
results = []
for scale in scales:
A = cgt.shared(1.0)
B = cgt.shared(1.0)
updates = nn.rmsprop(f(A, scale) + f(B, scale), [A, B], learning_rate=0.01)
do_update = cgt.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.op.get_value(), B.op.get_value())
results.append(A.op.get_value().copy())
assert np.allclose(results, torch_values['rmsprop'])
@across_configs
def test_adadelta():
results = []
for scale in scales:
A = cgt.shared(1.0)
B = cgt.shared(1.0)
updates = nn.adadelta(f(A, scale) + f(B, scale), [A, B])
do_update = cgt.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.op.get_value(), B.op.get_value())
results.append(A.op.get_value().copy())
assert np.allclose(results, torch_values['adadelta'])
if __name__ == "__main__":
import nose
nose.runmodule()
| mit |
silvau/Addons_Odoo | cfd_mx/wizard/reporte_mensual_wizard.py | 1 | 4286 | # -*- encoding: utf-8 -*-
############################################################################
# Module for OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Zenpar - http://www.zeval.com.mx/
# All Rights Reserved.
############################################################################
# Coded by: jsolorzano@zeval.com.mx
# Manager: Orlando Zentella ozentella@zeval.com.mx
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
import base64
from datetime import date, datetime
from calendar import monthrange
class reporte_mensual_wizard(osv.TransientModel):
_name = 'cfd_mx.reporte.mensual.wizard'
def _invoice_to_txt(self, invoice):
campos = [
invoice.partner_id.vat or '',
invoice.serie or '',
invoice.internal_number,
str(invoice.anoAprobacion)+str(invoice.noAprobacion),
invoice.date_invoice,
"%.2f"%invoice.amount_total,
"%.2f"%invoice.amount_tax,
(invoice.state == 'cancel' and '0') or '1',
(invoice.type == 'out_invoice' and 'I') or (invoice.type == 'out_refund' and 'E'),
'',
'',
''
]
return '|' + '|'.join(campos) + '|'
def action_reporte(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
txt = "hola"
out = base64.encodestring(txt)
vat = self.pool.get("res.users").browse(cr, uid, uid).partner_id.vat
fname = "1%s%02d%4d"%(vat, this.mes, this.ano)
fecha_inicio = "%d-%02d-01"%(this.ano, this.mes)
ultimo_dia_mes = monthrange(this.ano, this.mes)[1]
fecha_fin = "%d-%02d-%d"%(this.ano, this.mes, ultimo_dia_mes)
invoices = self.pool.get('account.invoice').search(cr, uid, [
('date_invoice', '>=', fecha_inicio),
('date_invoice', '<=', fecha_fin),
('state', 'in', ['cancel','open','paid'])
])
txt = ''
for invoice in self.pool.get('account.invoice').browse(cr, uid, invoices):
txt += self._invoice_to_txt(invoice) + "\n"
out = base64.encodestring(txt)
self.write(cr, uid, ids, {'state':'get', 'data':out, 'fname': fname}, context=context)
return {
'type': 'ir.actions.act_window',
'name': 'Reporte mensual SAT',
'res_model': 'cfd_mx.reporte.mensual.wizard',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
_columns = {
'state': fields.selection((
('choose','choose'), # escoger periodo
('get','get'), # obtener archivo
)),
'mes': fields.selection([
(1,'Enero'),
(2,'Febrero'),
(3,'Marzo'),
(4,'Abril'),
(5,'Mayo'),
(6,'Junio'),
(7,'Julio'),
(8,'Agosto'),
(9,'Septiembre'),
(10,'Octubre'),
(11,'Noviembre'),
(12,'Diciembre'),
], string="Mes"),
'ano': fields.integer("Año"),
'data': fields.binary('Archivo', readonly=True),
'fname': fields.char("Nombre", size=128)
}
_defaults = {
'state' : 'choose',
'mes': 1,
'ano': lambda *a: int(date.today().strftime("%Y"))
}
| gpl-2.0 |
hocinebendou/bika.gsoc | bika/lims/browser/accreditation.py | 3 | 4223 | # encoding=utf-8
from Products.CMFPlone.utils import safe_unicode
from bika.lims.controlpanel.bika_analysisservices import AnalysisServicesView
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from plone.app.content.browser.interfaces import IFolderContentsView
from zope.interface import implements
class AccreditationView(AnalysisServicesView):
"""
>>> portal = layer['portal']
>>> portal_url = portal.absolute_url()
>>> from plone.app.testing import SITE_OWNER_NAME
>>> from plone.app.testing import SITE_OWNER_PASSWORD
>>> browser = layer['getBrowser'](portal)
>>> browser.open(portal_url+"/accreditation")
>>> 'SAI is the' in browser.contents
True
"""
implements(IFolderContentsView)
def __init__(self, context, request):
super(AccreditationView, self).__init__(context, request)
self.contentFilter = {'portal_type': 'AnalysisService',
'sort_on': 'sortable_title',
'getAccredited': True,
'inactive_state': 'active'}
self.context_actions = {}
self.icon = self.portal_url + "/++resource++bika.lims.images/accredited_big.png"
self.title = self.context.translate(_("Accreditation"))
lab = context.bika_setup.laboratory
accredited = lab.getLaboratoryAccredited()
self.mapping = {'lab_is_accredited': accredited,
'lab_name': safe_unicode(lab.getName()),
'lab_country': safe_unicode(lab.getPhysicalAddress().get('country', '')),
'confidence': safe_unicode(lab.getConfidence()),
'accreditation_body_abbr': safe_unicode(lab.getAccreditationBody()),
'accreditation_body_name': safe_unicode(lab.getAccreditationBodyURL()),
'accreditation_standard': safe_unicode(lab.getAccreditation()),
'accreditation_reference': safe_unicode(lab.getAccreditationReference())
}
if accredited:
self.description = t(_(safe_unicode(lab.getAccreditationPageHeader()),
mapping=self.mapping
))
else:
self.description = t(_("The lab is not accredited, or accreditation has "
"not been configured. "))
msg = t(_("All Accredited analysis services are listed here."))
self.description = "%s<p><br/>%s</p>" % (self.description, msg)
self.show_select_column = False
request.set('disable_border', 1)
self.columns = {
'Title': {'title': _('Service'), 'sortable': False},
'Keyword': {'title': _('Keyword'), 'sortable': False},
'Category': {'title': _('Category'), 'sortable': False},
'Department': {'title': _('Department'), 'sortable': False},
'Instrument': {'title': _('Instrument'), 'sortable': False},
'Unit': {'title': _('Unit'), 'sortable': False},
'Price': {'title': _('Price'), 'sortable': False},
'MaxTimeAllowed': {'title': _('Max Time'), 'sortable': False},
'DuplicateVariation': {'title': _('Dup Var'), 'sortable': False},
'Calculation': {'title': _('Calculation'), 'sortable': False},
}
self.review_states = [
{'id': 'default',
'title': _('All'),
'contentFilter': {},
'transitions': [{'id': 'empty'}, ], # none
'columns': ['Title',
'Keyword',
'Category',
'Price',
'MaxTimeAllowed',
'DuplicateVariation',
],
},
]
if not self.context.bika_setup.getShowPrices():
self.review_states[0]['columns'].remove('Price')
def selected_cats(self, items):
"""return a list of all categories with accredited services
"""
cats = []
for item in items:
if 'category' in item and item['category'] not in cats:
cats.append(item['category'])
return cats
| mit |
sem-geologist/hyperspy | hyperspy/io_plugins/netcdf.py | 4 | 6678 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import numpy as np
_logger = logging.getLogger(__name__)
no_netcdf = False
try:
from netCDF4 import Dataset
which_netcdf = 'netCDF4'
except BaseException:
try:
from netCDF3 import Dataset
which_netcdf = 'netCDF3'
except BaseException:
try:
from Scientific.IO.NetCDF import NetCDFFile as Dataset
which_netcdf = 'Scientific Python'
except BaseException:
no_netcdf = True
# Plugin characteristics
# ----------------------
format_name = 'netCDF'
description = ''
full_support = True
file_extensions = ('nc', 'NC')
default_extension = 0
# Writing features
writes = False
# ----------------------
attrib2netcdf = \
{
'energyorigin': 'energy_origin',
'energyscale': 'energy_scale',
'energyunits': 'energy_units',
'xorigin': 'x_origin',
'xscale': 'x_scale',
'xunits': 'x_units',
'yorigin': 'y_origin',
'yscale': 'y_scale',
'yunits': 'y_units',
'zorigin': 'z_origin',
'zscale': 'z_scale',
'zunits': 'z_units',
'exposure': 'exposure',
'title': 'title',
'binning': 'binning',
'readout_frequency': 'readout_frequency',
'ccd_height': 'ccd_height',
'blanking': 'blanking'
}
acquisition2netcdf = \
{
'exposure': 'exposure',
'binning': 'binning',
'readout_frequency': 'readout_frequency',
'ccd_height': 'ccd_height',
'blanking': 'blanking',
'gain': 'gain',
'pppc': 'pppc',
}
treatments2netcdf = \
{
'dark_current': 'dark_current',
'readout': 'readout',
}
def file_reader(filename, *args, **kwds):
if no_netcdf is True:
raise ImportError("No netCDF library installed. "
"To read EELSLab netcdf files install "
"one of the following packages:"
"netCDF4, netCDF3, netcdf, scientific")
ncfile = Dataset(filename, 'r')
if hasattr(ncfile, 'file_format_version'):
if ncfile.file_format_version == 'EELSLab 0.1':
dictionary = nc_hyperspy_reader_0dot1(
ncfile,
filename,
*args,
**kwds)
else:
ncfile.close()
raise IOError('Unsupported netCDF file')
return dictionary,
def nc_hyperspy_reader_0dot1(ncfile, filename, *args, **kwds):
calibration_dict, acquisition_dict, treatments_dict = {}, {}, {}
dc = ncfile.variables['data_cube']
data = dc[:]
if 'history' in calibration_dict:
calibration_dict['history'] = eval(ncfile.history)
for attrib in attrib2netcdf.items():
if hasattr(dc, attrib[1]):
value = eval('dc.' + attrib[1])
if isinstance(value, np.ndarray):
calibration_dict[attrib[0]] = value[0]
else:
calibration_dict[attrib[0]] = value
else:
_logger.warning("Warning: the attribute '%s' is not defined in "
"the file '%s'", attrib[0], filename)
for attrib in acquisition2netcdf.items():
if hasattr(dc, attrib[1]):
value = eval('dc.' + attrib[1])
if isinstance(value, np.ndarray):
acquisition_dict[attrib[0]] = value[0]
else:
acquisition_dict[attrib[0]] = value
else:
_logger.warning("Warning: the attribute '%s' is not defined in "
"the file '%s'", attrib[0], filename)
for attrib in treatments2netcdf.items():
if hasattr(dc, attrib[1]):
treatments_dict[attrib[0]] = eval('dc.' + attrib[1])
else:
_logger.warning("Warning: the attribute '%s' is not defined in "
"the file '%s'", attrib[0], filename)
original_metadata = {'record_by': ncfile.type,
'calibration': calibration_dict,
'acquisition': acquisition_dict,
'treatments': treatments_dict}
ncfile.close()
# Now we'll map some parameters
record_by = 'image' if original_metadata[
'record_by'] == 'image' else 'spectrum'
if record_by == 'image':
dim = len(data.shape)
names = ['Z', 'Y', 'X'][3 - dim:]
scaleskeys = ['zscale', 'yscale', 'xscale']
originskeys = ['zorigin', 'yorigin', 'xorigin']
unitskeys = ['zunits', 'yunits', 'xunits']
elif record_by == 'spectrum':
dim = len(data.shape)
names = ['Y', 'X', 'Energy'][3 - dim:]
scaleskeys = ['yscale', 'xscale', 'energyscale']
originskeys = ['yorigin', 'xorigin', 'energyorigin']
unitskeys = ['yunits', 'xunits', 'energyunits']
# The images are recorded in the Fortran order
data = data.T.copy()
try:
scales = [calibration_dict[key] for key in scaleskeys[3 - dim:]]
except KeyError:
scales = [1, 1, 1][3 - dim:]
try:
origins = [calibration_dict[key] for key in originskeys[3 - dim:]]
except KeyError:
origins = [0, 0, 0][3 - dim:]
try:
units = [calibration_dict[key] for key in unitskeys[3 - dim:]]
except KeyError:
units = ['', '', '']
axes = [
{
'size': int(data.shape[i]),
'index_in_array': i,
'name': names[i],
'scale': scales[i],
'offset': origins[i],
'units': units[i], }
for i in range(dim)]
metadata = {'General': {}, 'Signal': {}}
metadata['General']['original_filename'] = os.path.split(filename)[1]
metadata["Signal"]['record_by'] = record_by
metadata["General"]['signal_type'] = ""
dictionary = {
'data': data,
'axes': axes,
'metadata': metadata,
'original_metadata': original_metadata,
}
return dictionary
| gpl-3.0 |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/ClassBrowser.py | 91 | 6369 | """Class browser.
XXX TO DO:
- reparse when source changed (maybe just a button would be OK?)
(or recheck on window popup)
- add popup menu with more options (e.g. doc strings, base classes, imports)
- show function argument list? (have to do pattern matching on source)
- should the classes and methods lists also be in the module's menu bar?
- add base classes to class browser tree
"""
import os
import sys
import pyclbr
from idlelib import PyShell
from idlelib.WindowList import ListedToplevel
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.configHandler import idleConf
class ClassBrowser:
def __init__(self, flist, name, path):
# XXX This API should change, if the file doesn't end in ".py"
# XXX the code here is bogus!
self.name = name
self.file = os.path.join(path[0], self.name + ".py")
self.init(flist)
def close(self, event=None):
self.top.destroy()
self.node.destroy()
def init(self, flist):
self.flist = flist
# reset pyclbr
pyclbr._modules.clear()
# create top
self.top = top = ListedToplevel(flist.root)
top.protocol("WM_DELETE_WINDOW", self.close)
top.bind("<Escape>", self.close)
self.settitle()
top.focus_set()
# create scrolled canvas
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = self.rootnode()
self.node = node = TreeNode(sc.canvas, None, item)
node.update()
node.expand()
def settitle(self):
self.top.wm_title("Class Browser - " + self.name)
self.top.wm_iconname("Class Browser")
def rootnode(self):
return ModuleBrowserTreeItem(self.file)
class ModuleBrowserTreeItem(TreeItem):
def __init__(self, file):
self.file = file
def GetText(self):
return os.path.basename(self.file)
def GetIconName(self):
return "python"
def GetSubList(self):
sublist = []
for name in self.listclasses():
item = ClassBrowserTreeItem(name, self.classes, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if os.path.normcase(self.file[-3:]) != ".py":
return
if not os.path.exists(self.file):
return
PyShell.flist.open(self.file)
def IsExpandable(self):
return os.path.normcase(self.file[-3:]) == ".py"
def listclasses(self):
dir, file = os.path.split(self.file)
name, ext = os.path.splitext(file)
if os.path.normcase(ext) != ".py":
return []
try:
dict = pyclbr.readmodule_ex(name, [dir] + sys.path)
except ImportError, msg:
return []
items = []
self.classes = {}
for key, cl in dict.items():
if cl.module == name:
s = key
if hasattr(cl, 'super') and cl.super:
supers = []
for sup in cl.super:
if type(sup) is type(''):
sname = sup
else:
sname = sup.name
if sup.module != cl.module:
sname = "%s.%s" % (sup.module, sname)
supers.append(sname)
s = s + "(%s)" % ", ".join(supers)
items.append((cl.lineno, s))
self.classes[s] = cl
items.sort()
list = []
for item, s in items:
list.append(s)
return list
class ClassBrowserTreeItem(TreeItem):
def __init__(self, name, classes, file):
self.name = name
self.classes = classes
self.file = file
try:
self.cl = self.classes[self.name]
except (IndexError, KeyError):
self.cl = None
self.isfunction = isinstance(self.cl, pyclbr.Function)
def GetText(self):
if self.isfunction:
return "def " + self.name + "(...)"
else:
return "class " + self.name
def GetIconName(self):
if self.isfunction:
return "python"
else:
return "folder"
def IsExpandable(self):
if self.cl:
try:
return not not self.cl.methods
except AttributeError:
return False
def GetSubList(self):
if not self.cl:
return []
sublist = []
for name in self.listmethods():
item = MethodBrowserTreeItem(name, self.cl, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = PyShell.flist.open(self.file)
if hasattr(self.cl, 'lineno'):
lineno = self.cl.lineno
edit.gotoline(lineno)
def listmethods(self):
if not self.cl:
return []
items = []
for name, lineno in self.cl.methods.items():
items.append((lineno, name))
items.sort()
list = []
for item, name in items:
list.append(name)
return list
class MethodBrowserTreeItem(TreeItem):
def __init__(self, name, cl, file):
self.name = name
self.cl = cl
self.file = file
def GetText(self):
return "def " + self.name + "(...)"
def GetIconName(self):
return "python" # XXX
def IsExpandable(self):
return 0
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = PyShell.flist.open(self.file)
edit.gotoline(self.cl.methods[self.name])
def main():
try:
file = __file__
except NameError:
file = sys.argv[0]
if sys.argv[1:]:
file = sys.argv[1]
else:
file = sys.argv[0]
dir, file = os.path.split(file)
name = os.path.splitext(file)[0]
ClassBrowser(PyShell.flist, name, [dir])
if sys.stdin is sys.__stdin__:
mainloop()
if __name__ == "__main__":
main()
| gpl-2.0 |
btrd/anomaly_detection | kmean/kMeanClusterer.py | 1 | 4409 | import random
import math
import json
import numpy as np
from normalization import Normalizer
from cluster import Cluster
class KMeanClusterer():
def __init__(self, observations, classes, k, n):
self.FIELDSNOTUSE = 2
self.clusterNumber = k
self.clusters = []
self.observations = observations
self.n = n
self.initialization()
self.lastClusters = []
self.classes = classes
while self.assignement():
for c in self.clusters:
c.updateCentroid()
for cluster in self.clusters:
if cluster.observations.shape[0] == 0:
self.lastClusters.append([[],[]])
else:
cluster.sortObservations()
corrects, anomalies = cluster.getAnomalies(self.n)
classCorrects = corrects[:,-2].astype(int)
if anomalies == [] :
classAnomalies = []
else:
classAnomalies = anomalies[:,-2].astype(int)
tmptab = [np.bincount(classAnomalies), np.bincount(classCorrects)]
self.lastClusters.append(tmptab)
def initialization(self):
for i in xrange(0, self.clusterNumber):
c = Cluster(i, len(self.observations[0]))
self.clusters.append(c)
i = 0
for obs in self.observations:
obs = np.append(obs, 0)
self.clusters[i % self.clusterNumber].addObservation(obs, 0)
i += 1
for c in self.clusters:
c.updateCentroid()
c.updateDist()
def assignement(self):
res = False
for cluster in self.clusters:
delObs = []
for i in xrange(0, cluster.observations.shape[0]):
obs = cluster.observations[i]
dist, nearestCluster = self.nearestCluster(obs)
if not nearestCluster.name == cluster.name:
nearestCluster.addObservation(obs, dist)
delObs.append(i)
res = True
cluster.deleteObservation(delObs)
return res
def nearestCluster(self, obs):
res = self.clusters[0]
minDist = self.computeDistance(obs, res.centroid)
for cluster in self.clusters:
newDist = self.computeDistance(obs, cluster.centroid)
if newDist < minDist:
minDist = newDist
res = cluster
return newDist, res
def computeDistance(self, obs, centroid):
res = 0
for i in xrange(0, len(centroid) - self.FIELDSNOTUSE):
res += ((obs[i] - centroid[i]) ** 2)
return math.sqrt(res)
def jsonify(self):
res = '{"clusters":['
i = 0
for cluster in self.lastClusters:
if i > 0:
res += ','
res += '{"stats":[{"anomalies":['
k = 0
for j in xrange(0, len(cluster[0])):
if cluster[0][j] > 0:
if k > 0:
res += ','
res += '{"label":"' + str(self.classes[j]) + '", "value":' + str(cluster[0][j]) + '}'
k += 1
res += ']},{"corrects":['
k = 0
for j in xrange(0, len(cluster[1])):
if cluster[1][j] > 0:
if k > 0:
res += ','
res += '{"label":"' + str(self.classes[j]) + '", "value":' + str(cluster[1][j]) + '}'
k += 1
res += ']}]}'
i += 1
res += '],"N":' + str(self.n) + '}'
return json.loads(res)
if __name__ == "__main__":
# datafile = "kddcup.data_10_percent.csv"
# fields = [0, 4, 5, 22, 24, 25, 28, 31, 32, 35, 37, 38]
# header = False
# fieldClass = 41
# k = 23
# n = 20
datafile = "kddcup.data_1000.csv"
header = False
fields = [0, 4, 5, 22, 24, 25, 28, 31, 32, 35, 37, 38]
fieldClass = 41
k = 17
n = 20
# datafile = "iris.csv"
# fields = [0, 1, 2, 3]
# fieldClass = 4
# header = True
# k = 3
# n = 50
norm = Normalizer(datafile, header)
res = norm.run(fields, fieldClass)
classes = norm.classes
kMeanClusterer = KMeanClusterer(res, classes, k, n)
print json.dumps(kMeanClusterer.jsonify(), indent=2, separators=(',', ': '))
| mit |
krismcfarlin/todo_angular_endpoints_sockets | bp_admin/users.py | 7 | 3990 | # -*- coding: utf-8 -*-
import webapp2
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import ndb
from collections import OrderedDict, Counter
from wtforms import fields
from bp_includes import forms
from bp_includes.lib.basehandler import BaseHandler
class AdminUserGeoChartHandler(BaseHandler):
def get(self):
users = self.user_model.query().fetch(projection=['country'])
users_by_country = Counter()
for user in users:
if user.country:
users_by_country[user.country] += 1
params = {
"data": users_by_country.items()
}
return self.render_template('admin_users_geochart.html', **params)
class EditProfileForm(forms.EditProfileForm):
activated = fields.BooleanField('Activated')
class AdminUserListHandler(BaseHandler):
def get(self):
p = self.request.get('p')
q = self.request.get('q')
c = self.request.get('c')
forward = True if p not in ['prev'] else False
cursor = Cursor(urlsafe=c)
if q:
qry = self.user_model.query(ndb.OR(self.user_model.last_name == q.lower(),
self.user_model.email == q.lower(),
self.user_model.username == q.lower()))
else:
qry = self.user_model.query()
PAGE_SIZE = 50
if forward:
users, next_cursor, more = qry.order(self.user_model.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
if next_cursor and more:
self.view.next_cursor = next_cursor
if c:
self.view.prev_cursor = cursor.reversed()
else:
users, next_cursor, more = qry.order(-self.user_model.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
users = list(reversed(users))
if next_cursor and more:
self.view.prev_cursor = next_cursor
self.view.next_cursor = cursor.reversed()
def pager_url(p, cursor):
params = OrderedDict()
if q:
params['q'] = q
if p in ['prev']:
params['p'] = p
if cursor:
params['c'] = cursor.urlsafe()
return self.uri_for('admin-users-list', **params)
self.view.pager_url = pager_url
self.view.q = q
params = {
"list_columns": [('username', 'Username'),
('name', 'Name'),
('last_name', 'Last Name'),
('email', 'Email'),
('country', 'Country'),
('tz', 'TimeZone')],
"users": users,
"count": qry.count()
}
return self.render_template('admin_users_list.html', **params)
class AdminUserEditHandler(BaseHandler):
def get_or_404(self, user_id):
try:
user = self.user_model.get_by_id(long(user_id))
if user:
return user
except ValueError:
pass
self.abort(404)
def edit(self, user_id):
if self.request.POST:
user = self.get_or_404(user_id)
if self.form.validate():
self.form.populate_obj(user)
user.put()
self.add_message("Changes saved!", 'success')
return self.redirect_to("admin-user-edit", user_id=user_id)
else:
self.add_message("Could not save changes!", 'error')
else:
user = self.get_or_404(user_id)
self.form.process(obj=user)
params = {
'user': user
}
return self.render_template('admin_user_edit.html', **params)
@webapp2.cached_property
def form(self):
f = EditProfileForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
| lgpl-3.0 |
pkuyym/Paddle | python/paddle/fluid/tests/unittests/test_regularizer.py | 5 | 4053 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
import paddle.fluid.regularizer as regularizer
from paddle.fluid.backward import append_backward
class TestL2DecayRegularizer(unittest.TestCase):
def test_l2decay_regularizer(self):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
regularizer=regularizer.L2DecayRegularizer(0.5))
self.assertTrue(mul_x.regularizer is not None)
self.assertTrue(
isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer))
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
count_ops = len(block.ops)
params_grads = optimizer.append_regularization_ops(params_grads)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(block.ops), count_ops + 2)
self.assertEqual(block.ops[-1].type, 'elementwise_add')
self.assertEqual(block.ops[-2].type, 'scale')
class TestL1DecayRegularizer(unittest.TestCase):
def test_l2decay_regularizer(self):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
regularizer=regularizer.L1DecayRegularizer(0.5))
self.assertTrue(mul_x.regularizer is not None)
self.assertTrue(
isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer))
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
count_ops = len(block.ops)
params_grads = optimizer.append_regularization_ops(params_grads)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(block.ops), count_ops + 3)
self.assertEqual(block.ops[-1].type, 'elementwise_add')
self.assertEqual(block.ops[-2].type, 'scale')
self.assertEqual(block.ops[-3].type, 'sign')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kennysgithub/sm-p607t-kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
msimacek/freeipa | ipalib/errors.py | 2 | 45284 | # Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty inmsgion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Custom exception classes (some which are RPC transparent).
`PrivateError` and its subclasses are custom IPA excetions that will *never* be
forwarded in a Remote Procedure Call (RPC) response.
On the other hand, `PublicError` and its subclasses can be forwarded in an RPC
response. These public errors each carry a unique integer error code as well as
a gettext translated error message (translated at the time the exception is
raised). The purpose of the public errors is to relay information about
*expected* user errors, service availability errors, and so on. They should
*never* be used for *unexpected* programmatic or run-time errors.
For security reasons it is *extremely* important that arbitrary exceptions *not*
be forwarded in an RPC response. Unexpected exceptions can easily contain
compromising information in their error messages. Any time the server catches
any exception that isn't a `PublicError` subclass, it should raise an
`InternalError`, which itself always has the same, static error message (and
therefore cannot be populated with information about the true exception).
The public errors are arranging into five main blocks of error code ranges:
============= ========================================
Error codes Exceptions
============= ========================================
1000 - 1999 `AuthenticationError` and its subclasses
2000 - 2999 `AuthorizationError` and its subclasses
3000 - 3999 `InvocationError` and its subclasses
4000 - 4999 `ExecutionError` and its subclasses
5000 - 5999 `GenericError` and its subclasses
============= ========================================
Within these five blocks some sub-ranges are already allocated for certain types
of error messages, while others are reserved for future use. Here are the
current block assignments:
- **900-5999** `PublicError` and its subclasses
- **901 - 907** Assigned to special top-level public errors
- **908 - 999** *Reserved for future use*
- **1000 - 1999** `AuthenticationError` and its subclasses
- **1001 - 1099** Open for general authentication errors
- **1100 - 1199** `KerberosError` and its subclasses
- **1200 - 1299** `SessionError` and its subclasses
- **1300 - 1999** *Reserved for future use*
- **2000 - 2999** `AuthorizationError` and its subclasses
- **2001 - 2099** Open for general authorization errors
- **2100 - 2199** `ACIError` and its subclasses
- **2200 - 2999** *Reserved for future use*
- **3000 - 3999** `InvocationError` and its subclasses
- **3001 - 3099** Open for general invocation errors
- **3100 - 3199** *Reserved for future use*
- **4000 - 4999** `ExecutionError` and its subclasses
- **4001 - 4099** Open for general execution errors
- **4100 - 4199** `BuiltinError` and its subclasses
- **4200 - 4299** `LDAPError` and its subclasses
- **4300 - 4399** `CertificateError` and its subclasses
- **4400 - 4999** *Reserved for future use*
- **5000 - 5999** `GenericError` and its subclasses
- **5001 - 5099** Open for generic errors
- **5100 - 5999** *Reserved for future use*
"""
from ipalib.text import ngettext as ungettext
from ipalib import messages
from ipaplatform.paths import paths
class PrivateError(StandardError):
"""
Base class for exceptions that are *never* forwarded in an RPC response.
"""
format = ''
def __init__(self, **kw):
self.msg = self.format % kw
self.kw = kw
for (key, value) in kw.items():
assert not hasattr(self, key), 'conflicting kwarg %s.%s = %r' % (
self.__class__.__name__, key, value,
)
setattr(self, key, value)
StandardError.__init__(self, self.msg)
class SubprocessError(PrivateError):
"""
Raised when ``subprocess.call()`` returns a non-zero exit status.
This custom exception is needed because Python 2.4 doesn't have the
``subprocess.CalledProcessError`` exception (which was added in Python 2.5).
For example:
>>> raise SubprocessError(returncode=2, argv=('ls', '-lh', '/no-foo/'))
Traceback (most recent call last):
...
SubprocessError: return code 2 from ('ls', '-lh', '/no-foo/')
The exit code of the sub-process is available via the ``returncode``
instance attribute. For example:
>>> e = SubprocessError(returncode=1, argv=(paths.BIN_FALSE,))
>>> e.returncode
1
>>> e.argv # argv is also available
('/bin/false',)
"""
format = 'return code %(returncode)d from %(argv)r'
class PluginSubclassError(PrivateError):
"""
Raised when a plugin doesn't subclass from an allowed base.
For example:
>>> raise PluginSubclassError(plugin='bad', bases=('base1', 'base2'))
Traceback (most recent call last):
...
PluginSubclassError: 'bad' not subclass of any base in ('base1', 'base2')
"""
format = '%(plugin)r not subclass of any base in %(bases)r'
class PluginDuplicateError(PrivateError):
"""
Raised when the same plugin class is registered more than once.
For example:
>>> raise PluginDuplicateError(plugin='my_plugin')
Traceback (most recent call last):
...
PluginDuplicateError: 'my_plugin' was already registered
"""
format = '%(plugin)r was already registered'
class PluginOverrideError(PrivateError):
"""
Raised when a plugin overrides another without using ``override=True``.
For example:
>>> raise PluginOverrideError(base='Command', name='env', plugin='my_env')
Traceback (most recent call last):
...
PluginOverrideError: unexpected override of Command.env with 'my_env'
"""
format = 'unexpected override of %(base)s.%(name)s with %(plugin)r'
class PluginMissingOverrideError(PrivateError):
"""
Raised when a plugin overrides another that has not been registered.
For example:
>>> raise PluginMissingOverrideError(base='Command', name='env', plugin='my_env')
Traceback (most recent call last):
...
PluginMissingOverrideError: Command.env not registered, cannot override with 'my_env'
"""
format = '%(base)s.%(name)s not registered, cannot override with %(plugin)r'
class SkipPluginModule(PrivateError):
"""
Raised to abort the loading of a plugin module.
"""
format = '%(reason)s'
class PluginsPackageError(PrivateError):
"""
Raised when ``package.plugins`` is a module instead of a sub-package.
"""
format = '%(name)s must be sub-package, not module: %(file)r'
##############################################################################
# Public errors:
_texts = []
def _(message):
_texts.append(message)
return message
class PublicError(StandardError):
"""
**900** Base class for exceptions that can be forwarded in an RPC response.
"""
def __init__(self, format=None, message=None, **kw):
messages.process_message_arguments(self, format, message, **kw)
super(PublicError, self).__init__(self.msg)
errno = 900
rval = 1
format = None
class VersionError(PublicError):
"""
**901** Raised when client and server versions are incompatible.
For example:
>>> raise VersionError(cver='2.0', sver='2.1', server='https://localhost')
Traceback (most recent call last):
...
VersionError: 2.0 client incompatible with 2.1 server at 'https://localhost'
"""
errno = 901
format = _("%(cver)s client incompatible with %(sver)s server at '%(server)s'")
class UnknownError(PublicError):
"""
**902** Raised when client does not know error it caught from server.
For example:
>>> raise UnknownError(code=57, server='localhost', error=u'a new error')
...
Traceback (most recent call last):
...
UnknownError: unknown error 57 from localhost: a new error
"""
errno = 902
format = _('unknown error %(code)d from %(server)s: %(error)s')
class InternalError(PublicError):
"""
**903** Raised to conceal a non-public exception.
For example:
>>> raise InternalError()
Traceback (most recent call last):
...
InternalError: an internal error has occurred
"""
errno = 903
format = _('an internal error has occurred')
def __init__(self, message=None):
"""
Security issue: ignore any information given to constructor.
"""
PublicError.__init__(self)
class ServerInternalError(PublicError):
"""
**904** Raised when client catches an `InternalError` from server.
For example:
>>> raise ServerInternalError(server='https://localhost')
Traceback (most recent call last):
...
ServerInternalError: an internal error has occurred on server at 'https://localhost'
"""
errno = 904
format = _("an internal error has occurred on server at '%(server)s'")
class CommandError(PublicError):
"""
**905** Raised when an unknown command is called.
For example:
>>> raise CommandError(name='foobar')
Traceback (most recent call last):
...
CommandError: unknown command 'foobar'
"""
errno = 905
format = _("unknown command '%(name)s'")
class ServerCommandError(PublicError):
"""
**906** Raised when client catches a `CommandError` from server.
For example:
>>> e = CommandError(name='foobar')
>>> raise ServerCommandError(error=e.message, server='https://localhost')
Traceback (most recent call last):
...
ServerCommandError: error on server 'https://localhost': unknown command 'foobar'
"""
errno = 906
format = _("error on server '%(server)s': %(error)s")
class NetworkError(PublicError):
"""
**907** Raised when a network connection cannot be created.
For example:
>>> raise NetworkError(uri='ldap://localhost:389', error=_(u'Connection refused'))
Traceback (most recent call last):
...
NetworkError: cannot connect to 'ldap://localhost:389': Connection refused
"""
errno = 907
format = _("cannot connect to '%(uri)s': %(error)s")
class ServerNetworkError(PublicError):
"""
**908** Raised when client catches a `NetworkError` from server.
"""
errno = 908
format = _("error on server '%(server)s': %(error)s")
class JSONError(PublicError):
"""
**909** Raised when server recieved a malformed JSON-RPC request.
"""
errno = 909
format = _('Invalid JSON-RPC request: %(error)s')
class XMLRPCMarshallError(PublicError):
"""
**910** Raised when the XML-RPC lib cannot marshall the request
For example:
>>> raise XMLRPCMarshallError(error=_('int exceeds XML-RPC limits'))
Traceback (most recent call last):
...
XMLRPCMarshallError: error marshalling data for XML-RPC transport: int exceeds XML-RPC limits
"""
errno = 910
format = _('error marshalling data for XML-RPC transport: %(error)s')
class RefererError(PublicError):
"""
**911** Raised when the request does not contain an HTTP referer
For example:
>>> raise RefererError(referer='referer')
Traceback (most recent call last):
...
RefererError: Missing or invalid HTTP Referer, referer
"""
errno = 911
format = _('Missing or invalid HTTP Referer, %(referer)s')
##############################################################################
# 1000 - 1999: Authentication errors
class AuthenticationError(PublicError):
"""
**1000** Base class for authentication errors (*1000 - 1999*).
"""
errno = 1000
class KerberosError(AuthenticationError):
"""
**1100** Base class for Kerberos authentication errors (*1100 - 1199*).
For example:
>>> raise KerberosError(major=_('Unspecified GSS failure. Minor code may provide more information'), minor=_('No credentials cache found'))
Traceback (most recent call last):
...
KerberosError: Kerberos error: Unspecified GSS failure. Minor code may provide more information/No credentials cache found
"""
errno = 1100
format= _('Kerberos error: %(major)s/%(minor)s')
class CCacheError(KerberosError):
"""
**1101** Raised when sever does not recieve Kerberose credentials.
For example:
>>> raise CCacheError()
Traceback (most recent call last):
...
CCacheError: did not receive Kerberos credentials
"""
errno = 1101
format = _('did not receive Kerberos credentials')
class ServiceError(KerberosError):
"""
**1102** Raised when service is not found in Kerberos DB.
For example:
>>> raise ServiceError(service='HTTP@localhost')
Traceback (most recent call last):
...
ServiceError: Service 'HTTP@localhost' not found in Kerberos database
"""
errno = 1102
format = _("Service '%(service)s' not found in Kerberos database")
class NoCCacheError(KerberosError):
"""
**1103** Raised when a client attempts to use Kerberos without a ccache.
For example:
>>> raise NoCCacheError()
Traceback (most recent call last):
...
NoCCacheError: No credentials cache found
"""
errno = 1103
format = _('No credentials cache found')
class TicketExpired(KerberosError):
"""
**1104** Raised when a client attempts to use an expired ticket
For example:
>>> raise TicketExpired()
Traceback (most recent call last):
...
TicketExpired: Ticket expired
"""
errno = 1104
format = _('Ticket expired')
class BadCCachePerms(KerberosError):
"""
**1105** Raised when a client has bad permissions on their ccache
For example:
>>> raise BadCCachePerms()
Traceback (most recent call last):
...
BadCCachePerms: Credentials cache permissions incorrect
"""
errno = 1105
format = _('Credentials cache permissions incorrect')
class BadCCacheFormat(KerberosError):
"""
**1106** Raised when a client has a misformated ccache
For example:
>>> raise BadCCacheFormat()
Traceback (most recent call last):
...
BadCCacheFormat: Bad format in credentials cache
"""
errno = 1106
format = _('Bad format in credentials cache')
class CannotResolveKDC(KerberosError):
"""
**1107** Raised when the KDC can't be resolved
For example:
>>> raise CannotResolveKDC()
Traceback (most recent call last):
...
CannotResolveKDC: Cannot resolve KDC for requested realm
"""
errno = 1107
format = _('Cannot resolve KDC for requested realm')
class SessionError(AuthenticationError):
"""
**1200** Base class for Session errors (*1200 - 1299*).
For example:
"""
errno = 1200
format= _('Session error')
class InvalidSessionPassword(SessionError):
"""
**1201** Raised when we cannot obtain a TGT for a principal.
"""
errno = 1201
format= _('Principal %(principal)s cannot be authenticated: %(message)s')
class PasswordExpired(InvalidSessionPassword):
"""
**1202** Raised when we cannot obtain a TGT for a principal because the password is expired.
"""
errno = 1202
##############################################################################
# 2000 - 2999: Authorization errors
class AuthorizationError(PublicError):
"""
**2000** Base class for authorization errors (*2000 - 2999*).
"""
errno = 2000
class ACIError(AuthorizationError):
"""
**2100** Base class for ACI authorization errors (*2100 - 2199*).
"""
errno = 2100
format = _('Insufficient access: %(info)s')
##############################################################################
# 3000 - 3999: Invocation errors
class InvocationError(PublicError):
"""
**3000** Base class for command invocation errors (*3000 - 3999*).
"""
errno = 3000
class EncodingError(InvocationError):
"""
**3001** Raised when received text is incorrectly encoded.
"""
errno = 3001
class BinaryEncodingError(InvocationError):
"""
**3002** Raised when received binary data is incorrectly encoded.
"""
errno = 3002
class ZeroArgumentError(InvocationError):
"""
**3003** Raised when a command is called with arguments but takes none.
For example:
>>> raise ZeroArgumentError(name='ping')
Traceback (most recent call last):
...
ZeroArgumentError: command 'ping' takes no arguments
"""
errno = 3003
format = _("command '%(name)s' takes no arguments")
class MaxArgumentError(InvocationError):
"""
**3004** Raised when a command is called with too many arguments.
For example:
>>> raise MaxArgumentError(name='user_add', count=2)
Traceback (most recent call last):
...
MaxArgumentError: command 'user_add' takes at most 2 arguments
"""
errno = 3004
def __init__(self, message=None, **kw):
if message is None:
format = ungettext(
"command '%(name)s' takes at most %(count)d argument",
"command '%(name)s' takes at most %(count)d arguments",
kw['count']
)
else:
format = None
InvocationError.__init__(self, format, message, **kw)
class OptionError(InvocationError):
"""
**3005** Raised when a command is called with unknown options.
"""
errno = 3005
class OverlapError(InvocationError):
"""
**3006** Raised when arguments and options overlap.
For example:
>>> raise OverlapError(names=['givenname', 'login'])
Traceback (most recent call last):
...
OverlapError: overlapping arguments and options: ['givenname', 'login']
"""
errno = 3006
format = _("overlapping arguments and options: %(names)s")
class RequirementError(InvocationError):
"""
**3007** Raised when a required parameter is not provided.
For example:
>>> raise RequirementError(name='givenname')
Traceback (most recent call last):
...
RequirementError: 'givenname' is required
"""
errno = 3007
format = _("'%(name)s' is required")
class ConversionError(InvocationError):
"""
**3008** Raised when parameter value can't be converted to correct type.
For example:
>>> raise ConversionError(name='age', error=_(u'must be an integer'))
Traceback (most recent call last):
...
ConversionError: invalid 'age': must be an integer
"""
errno = 3008
format = _("invalid '%(name)s': %(error)s")
class ValidationError(InvocationError):
"""
**3009** Raised when a parameter value fails a validation rule.
For example:
>>> raise ValidationError(name='sn', error=_(u'can be at most 128 characters'))
Traceback (most recent call last):
...
ValidationError: invalid 'sn': can be at most 128 characters
"""
errno = 3009
format = _("invalid '%(name)s': %(error)s")
class NoSuchNamespaceError(InvocationError):
"""
**3010** Raised when an unknown namespace is requested.
For example:
>>> raise NoSuchNamespaceError(name='Plugins')
Traceback (most recent call last):
...
NoSuchNamespaceError: api has no such namespace: 'Plugins'
"""
errno = 3010
format = _("api has no such namespace: '%(name)s'")
class PasswordMismatch(InvocationError):
"""
**3011** Raise when password and password confirmation don't match.
"""
errno = 3011
format = _('Passwords do not match')
class NotImplementedError(InvocationError):
"""
**3012** Raise when a function hasn't been implemented.
"""
errno = 3012
format = _('Command not implemented')
class NotConfiguredError(InvocationError):
"""
**3013** Raise when there is no configuration
"""
errno = 3013
format = _('Client is not configured. Run ipa-client-install.')
class PromptFailed(InvocationError):
"""
**3014** Raise when an interactive prompt failed.
"""
errno = 3014
format = _('Could not get %(name)s interactively')
class DeprecationError(InvocationError):
"""
**3015** Raise when a command has been deprecated
For example:
>>> raise DeprecationError(name='hbacrule_add_sourcehost')
Traceback (most recent call last):
...
DeprecationError: Command 'hbacrule_add_sourcehost' has been deprecated
"""
errno = 3015
format = _("Command '%(name)s' has been deprecated")
class NotAForestRootError(InvocationError):
"""
**3016** Raised when an attempt to establish trust is done against non-root domain
Forest root domain has the same name as the forest itself
For example:
>>> raise NotAForestRootError(forest='example.test', domain='jointops.test')
Traceback (most recent call last):
...
NotAForestRootError: Domain 'jointops.test' is not a root domain for forest 'example.test'
"""
errno = 3016
format = _("Domain '%(domain)s' is not a root domain for forest '%(forest)s'")
##############################################################################
# 4000 - 4999: Execution errors
class ExecutionError(PublicError):
"""
**4000** Base class for execution errors (*4000 - 4999*).
"""
errno = 4000
class NotFound(ExecutionError):
"""
**4001** Raised when an entry is not found.
For example:
>>> raise NotFound(reason='no such user')
Traceback (most recent call last):
...
NotFound: no such user
"""
errno = 4001
rval = 2
format = _('%(reason)s')
class DuplicateEntry(ExecutionError):
"""
**4002** Raised when an entry already exists.
For example:
>>> raise DuplicateEntry
Traceback (most recent call last):
...
DuplicateEntry: This entry already exists
"""
errno = 4002
format = _('This entry already exists')
class HostService(ExecutionError):
"""
**4003** Raised when a host service principal is requested
For example:
>>> raise HostService
Traceback (most recent call last):
...
HostService: You must enroll a host in order to create a host service
"""
errno = 4003
format = _('You must enroll a host in order to create a host service')
class MalformedServicePrincipal(ExecutionError):
"""
**4004** Raised when a service principal is not of the form: service/fully-qualified host name
For example:
>>> raise MalformedServicePrincipal(reason=_('missing service'))
Traceback (most recent call last):
...
MalformedServicePrincipal: Service principal is not of the form: service/fully-qualified host name: missing service
"""
errno = 4004
format = _('Service principal is not of the form: service/fully-qualified host name: %(reason)s')
class RealmMismatch(ExecutionError):
"""
**4005** Raised when the requested realm does not match the IPA realm
For example:
>>> raise RealmMismatch
Traceback (most recent call last):
...
RealmMismatch: The realm for the principal does not match the realm for this IPA server
"""
errno = 4005
format = _('The realm for the principal does not match the realm for this IPA server')
class RequiresRoot(ExecutionError):
"""
**4006** Raised when a command requires the unix super-user to run
For example:
>>> raise RequiresRoot
Traceback (most recent call last):
...
RequiresRoot: This command requires root access
"""
errno = 4006
format = _('This command requires root access')
class AlreadyPosixGroup(ExecutionError):
"""
**4007** Raised when a group is already a posix group
For example:
>>> raise AlreadyPosixGroup
Traceback (most recent call last):
...
AlreadyPosixGroup: This is already a posix group
"""
errno = 4007
format = _('This is already a posix group')
class MalformedUserPrincipal(ExecutionError):
"""
**4008** Raised when a user principal is not of the form: user@REALM
For example:
>>> raise MalformedUserPrincipal(principal='jsmith@@EXAMPLE.COM')
Traceback (most recent call last):
...
MalformedUserPrincipal: Principal is not of the form user@REALM: 'jsmith@@EXAMPLE.COM'
"""
errno = 4008
format = _("Principal is not of the form user@REALM: '%(principal)s'")
class AlreadyActive(ExecutionError):
"""
**4009** Raised when an entry is made active that is already active
For example:
>>> raise AlreadyActive()
Traceback (most recent call last):
...
AlreadyActive: This entry is already enabled
"""
errno = 4009
format = _('This entry is already enabled')
class AlreadyInactive(ExecutionError):
"""
**4010** Raised when an entry is made inactive that is already inactive
For example:
>>> raise AlreadyInactive()
Traceback (most recent call last):
...
AlreadyInactive: This entry is already disabled
"""
errno = 4010
format = _('This entry is already disabled')
class HasNSAccountLock(ExecutionError):
"""
**4011** Raised when an entry has the nsAccountLock attribute set
For example:
>>> raise HasNSAccountLock()
Traceback (most recent call last):
...
HasNSAccountLock: This entry cannot be enabled or disabled
"""
errno = 4011
format = _('This entry cannot be enabled or disabled')
class NotGroupMember(ExecutionError):
"""
**4012** Raised when a non-member is attempted to be removed from a group
For example:
>>> raise NotGroupMember()
Traceback (most recent call last):
...
NotGroupMember: This entry is not a member
"""
errno = 4012
format = _('This entry is not a member')
class RecursiveGroup(ExecutionError):
"""
**4013** Raised when a group is added as a member of itself
For example:
>>> raise RecursiveGroup()
Traceback (most recent call last):
...
RecursiveGroup: A group may not be a member of itself
"""
errno = 4013
format = _('A group may not be a member of itself')
class AlreadyGroupMember(ExecutionError):
"""
**4014** Raised when a member is attempted to be re-added to a group
For example:
>>> raise AlreadyGroupMember()
Traceback (most recent call last):
...
AlreadyGroupMember: This entry is already a member
"""
errno = 4014
format = _('This entry is already a member')
class Base64DecodeError(ExecutionError):
"""
**4015** Raised when a base64-encoded blob cannot decoded
For example:
>>> raise Base64DecodeError(reason=_('Incorrect padding'))
Traceback (most recent call last):
...
Base64DecodeError: Base64 decoding failed: Incorrect padding
"""
errno = 4015
format = _('Base64 decoding failed: %(reason)s')
class RemoteRetrieveError(ExecutionError):
"""
**4016** Raised when retrieving data from a remote server fails
For example:
>>> raise RemoteRetrieveError(reason=_("Failed to get certificate chain."))
Traceback (most recent call last):
...
RemoteRetrieveError: Failed to get certificate chain.
"""
errno = 4016
format = _('%(reason)s')
class SameGroupError(ExecutionError):
"""
**4017** Raised when adding a group as a member of itself
For example:
>>> raise SameGroupError()
Traceback (most recent call last):
...
SameGroupError: A group may not be added as a member of itself
"""
errno = 4017
format = _('A group may not be added as a member of itself')
class DefaultGroupError(ExecutionError):
"""
**4018** Raised when removing the default user group
For example:
>>> raise DefaultGroupError()
Traceback (most recent call last):
...
DefaultGroupError: The default users group cannot be removed
"""
errno = 4018
format = _('The default users group cannot be removed')
class DNSNotARecordError(ExecutionError):
"""
**4019** Raised when a hostname is not a DNS A/AAAA record
For example:
>>> raise DNSNotARecordError()
Traceback (most recent call last):
...
DNSNotARecordError: Host does not have corresponding DNS A/AAAA record
"""
errno = 4019
format = _('Host does not have corresponding DNS A/AAAA record')
class ManagedGroupError(ExecutionError):
"""
**4020** Raised when a managed group is deleted
For example:
>>> raise ManagedGroupError()
Traceback (most recent call last):
...
ManagedGroupError: Deleting a managed group is not allowed. It must be detached first.
"""
errno = 4020
format = _('Deleting a managed group is not allowed. It must be detached first.')
class ManagedPolicyError(ExecutionError):
"""
**4021** Raised when password policy is assigned to a managed group
For example:
>>> raise ManagedPolicyError()
Traceback (most recent call last):
...
ManagedPolicyError: A managed group cannot have a password policy.
"""
errno = 4021
format = _('A managed group cannot have a password policy.')
class FileError(ExecutionError):
"""
**4022** Errors when dealing with files
For example:
>>> raise FileError(reason=_("cannot write file \'test\'"))
Traceback (most recent call last):
...
FileError: cannot write file 'test'
"""
errno = 4022
format = _('%(reason)s')
class NoCertificateError(ExecutionError):
"""
**4023** Raised when trying to retrieve a certificate that doesn't exist.
For example:
>>> raise NoCertificateError(entry='ipa.example.com')
Traceback (most recent call last):
...
NoCertificateError: 'ipa.example.com' doesn't have a certificate.
"""
errno = 4023
format = _('\'%(entry)s\' doesn\'t have a certificate.')
class ManagedGroupExistsError(ExecutionError):
"""
**4024** Raised when adding a user and its managed group exists
For example:
>>> raise ManagedGroupExistsError(group=u'engineering')
Traceback (most recent call last):
...
ManagedGroupExistsError: Unable to create private group. A group 'engineering' already exists.
"""
errno = 4024
format = _('Unable to create private group. A group \'%(group)s\' already exists.')
class ReverseMemberError(ExecutionError):
"""
**4025** Raised when verifying that all reverse members have been added or removed.
For example:
>>> raise ReverseMemberError(verb=_('added'), exc=_("Group 'foo' not found."))
Traceback (most recent call last):
...
ReverseMemberError: A problem was encountered when verifying that all members were added: Group 'foo' not found.
"""
errno = 4025
format = _('A problem was encountered when verifying that all members were %(verb)s: %(exc)s')
class AttrValueNotFound(ExecutionError):
"""
**4026** Raised when an Attribute/Value pair is not found.
For example:
>>> raise AttrValueNotFound(attr='ipasudoopt', value='authenticate')
Traceback (most recent call last):
...
AttrValueNotFound: ipasudoopt does not contain 'authenticate'
"""
errno = 4026
rval = 1
format = _('%(attr)s does not contain \'%(value)s\'')
class SingleMatchExpected(ExecutionError):
"""
**4027** Raised when a search should return a single match
For example:
>>> raise SingleMatchExpected(found=9)
Traceback (most recent call last):
...
SingleMatchExpected: The search criteria was not specific enough. Expected 1 and found 9.
"""
errno = 4027
rval = 1
format = _('The search criteria was not specific enough. Expected 1 and found %(found)d.')
class AlreadyExternalGroup(ExecutionError):
"""
**4028** Raised when a group is already an external member group
For example:
>>> raise AlreadyExternalGroup
Traceback (most recent call last):
...
AlreadyExternalGroup: This group already allows external members
"""
errno = 4028
format = _('This group already allows external members')
class ExternalGroupViolation(ExecutionError):
"""
**4029** Raised when a group is already an external member group
and an attempt is made to use it as posix group
For example:
>>> raise ExternalGroupViolation
Traceback (most recent call last):
...
ExternalGroupViolation: This group cannot be posix because it is external
"""
errno = 4029
format = _('This group cannot be posix because it is external')
class PosixGroupViolation(ExecutionError):
"""
**4030** Raised when a group is already a posix group
and cannot be converted to external
For example:
>>> raise PosixGroupViolation
Traceback (most recent call last):
...
PosixGroupViolation: This is already a posix group and cannot be converted to external one
"""
errno = 4030
format = _('This is already a posix group and cannot be converted to external one')
class EmptyResult(NotFound):
"""
**4031** Raised when a LDAP search returned no results.
For example:
>>> raise EmptyResult(reason='no matching entry found')
Traceback (most recent call last):
...
EmptyResult: no matching entry found
"""
errno = 4031
class InvalidDomainLevelError(ExecutionError):
"""
**4032** Raised when a operation could not be completed due to a invalid
domain level.
For example:
>>> raise InvalidDomainLevelError(reason='feature requires domain level 4')
Traceback (most recent call last):
...
InvalidDomainLevelError: feature requires domain level 4
"""
errno = 4032
format = _('%(reason)s')
class BuiltinError(ExecutionError):
"""
**4100** Base class for builtin execution errors (*4100 - 4199*).
"""
errno = 4100
class HelpError(BuiltinError):
"""
**4101** Raised when requesting help for an unknown topic.
For example:
>>> raise HelpError(topic='newfeature')
Traceback (most recent call last):
...
HelpError: no command nor help topic 'newfeature'
"""
errno = 4101
format = _("no command nor help topic '%(topic)s'")
class LDAPError(ExecutionError):
"""
**4200** Base class for LDAP execution errors (*4200 - 4299*).
"""
errno = 4200
class MidairCollision(ExecutionError):
"""
**4201** Raised when a change collides with another change
For example:
>>> raise MidairCollision()
Traceback (most recent call last):
...
MidairCollision: change collided with another change
"""
errno = 4201
format = _('change collided with another change')
class EmptyModlist(ExecutionError):
"""
**4202** Raised when an LDAP update makes no changes
For example:
>>> raise EmptyModlist()
Traceback (most recent call last):
...
EmptyModlist: no modifications to be performed
"""
errno = 4202
format = _('no modifications to be performed')
class DatabaseError(ExecutionError):
"""
**4203** Raised when an LDAP error is not otherwise handled
For example:
>>> raise DatabaseError(desc=_("Can't contact LDAP server"), info=_('Info goes here'))
Traceback (most recent call last):
...
DatabaseError: Can't contact LDAP server: Info goes here
"""
errno = 4203
format = _('%(desc)s: %(info)s')
class LimitsExceeded(ExecutionError):
"""
**4204** Raised when search limits are exceeded.
For example:
>>> raise LimitsExceeded()
Traceback (most recent call last):
...
LimitsExceeded: limits exceeded for this query
"""
errno = 4204
format = _('limits exceeded for this query')
class ObjectclassViolation(ExecutionError):
"""
**4205** Raised when an entry is missing a required attribute or objectclass
For example:
>>> raise ObjectclassViolation(info=_('attribute "krbPrincipalName" not allowed'))
Traceback (most recent call last):
...
ObjectclassViolation: attribute "krbPrincipalName" not allowed
"""
errno = 4205
format = _('%(info)s')
class NotAllowedOnRDN(ExecutionError):
"""
**4206** Raised when an RDN value is modified.
For example:
>>> raise NotAllowedOnRDN()
Traceback (most recent call last):
...
NotAllowedOnRDN: modifying primary key is not allowed
"""
errno = 4206
format = _('modifying primary key is not allowed')
class OnlyOneValueAllowed(ExecutionError):
"""
**4207** Raised when trying to set more than one value to single-value attributes
For example:
>> raise OnlyOneValueAllowed(attr='ipasearchtimelimit')
Traceback (most recent call last):
...
OnlyOneValueAllowed: ipasearchtimelimit: Only one value allowed.
"""
errno = 4207
format = _('%(attr)s: Only one value allowed.')
class InvalidSyntax(ExecutionError):
"""
**4208** Raised when an value does not match the required syntax
For example:
>> raise InvalidSyntax(attr='ipahomesrootdir')
Traceback (most recent call last):
...
InvalidSyntax: ipahomesrootdir: Invalid syntax
"""
errno = 4208
format = _('%(attr)s: Invalid syntax.')
class BadSearchFilter(ExecutionError):
"""
**4209** Raised when an invalid LDAP search filter is used
For example:
>>> raise BadSearchFilter(info=_('invalid syntax'))
Traceback (most recent call last):
...
BadSearchFilter: Bad search filter invalid syntax
"""
errno = 4209
format = _('Bad search filter %(info)s')
class NotAllowedOnNonLeaf(ExecutionError):
"""
**4210** Raised when operation is not allowed on a non-leaf entry
For example:
>>> raise NotAllowedOnNonLeaf()
Traceback (most recent call last):
...
NotAllowedOnNonLeaf: Not allowed on non-leaf entry
"""
errno = 4210
format = _('Not allowed on non-leaf entry')
class DatabaseTimeout(DatabaseError):
"""
**4211** Raised when an LDAP call times out
For example:
>>> raise DatabaseTimeout()
Traceback (most recent call last):
...
DatabaseTimeout: LDAP timeout
"""
errno = 4211
format = _('LDAP timeout')
class DNSDataMismatch(ExecutionError):
"""
**4212** Raised when an DNS query didn't return expected answer
in a configured time limit.
For example:
>>> raise DNSDataMismatch(expected="zone3.test. 86400 IN A 192.0.2.1", \
got="zone3.test. 86400 IN A 192.168.1.1")
Traceback (most recent call last):
...
DNSDataMismatch: DNS check failed: Expected {zone3.test. 86400 IN A 192.0.2.1} got {zone3.test. 86400 IN A 192.168.1.1}
"""
errno = 4212
format = _('DNS check failed: Expected {%(expected)s} got {%(got)s}')
class TaskTimeout(DatabaseError):
"""
**4213** Raised when an LDAP task times out
For example:
>>> raise TaskTimeout(task='Automember', task_dn='')
Traceback (most recent call last):
...
TaskTimeout: Automember LDAP task timeout, Task DN: ''
"""
errno = 4213
format = _("%(task)s LDAP task timeout, Task DN: '%(task_dn)s'")
class CertificateError(ExecutionError):
"""
**4300** Base class for Certificate execution errors (*4300 - 4399*).
"""
errno = 4300
class CertificateOperationError(CertificateError):
"""
**4301** Raised when a certificate operation cannot be completed
For example:
>>> raise CertificateOperationError(error=_(u'bad serial number'))
Traceback (most recent call last):
...
CertificateOperationError: Certificate operation cannot be completed: bad serial number
"""
errno = 4301
format = _('Certificate operation cannot be completed: %(error)s')
class CertificateFormatError(CertificateError):
"""
**4302** Raised when a certificate is badly formatted
For example:
>>> raise CertificateFormatError(error=_(u'improperly formated DER-encoded certificate'))
Traceback (most recent call last):
...
CertificateFormatError: Certificate format error: improperly formated DER-encoded certificate
"""
errno = 4302
format = _('Certificate format error: %(error)s')
class MutuallyExclusiveError(ExecutionError):
"""
**4303** Raised when an operation would result in setting two attributes which are mutually exlusive.
For example:
>>> raise MutuallyExclusiveError(reason=_(u'hosts may not be added when hostcategory=all'))
Traceback (most recent call last):
...
MutuallyExclusiveError: hosts may not be added when hostcategory=all
"""
errno = 4303
format = _('%(reason)s')
class NonFatalError(ExecutionError):
"""
**4304** Raised when part of an operation succeeds and the part that failed isn't critical.
For example:
>>> raise NonFatalError(reason=_(u'The host was added but the DNS update failed'))
Traceback (most recent call last):
...
NonFatalError: The host was added but the DNS update failed
"""
errno = 4304
format = _('%(reason)s')
class AlreadyRegisteredError(ExecutionError):
"""
**4305** Raised when registering a user that is already registered.
For example:
>>> raise AlreadyRegisteredError()
Traceback (most recent call last):
...
AlreadyRegisteredError: Already registered
"""
errno = 4305
format = _('Already registered')
class NotRegisteredError(ExecutionError):
"""
**4306** Raised when not registered and a registration is required
For example:
>>> raise NotRegisteredError()
Traceback (most recent call last):
...
NotRegisteredError: Not registered yet
"""
errno = 4306
format = _('Not registered yet')
class DependentEntry(ExecutionError):
"""
**4307** Raised when an entry being deleted has dependencies
For example:
>>> raise DependentEntry(label=u'SELinux User Map', key=u'test', dependent=u'test1')
Traceback (most recent call last):
...
DependentEntry: test cannot be deleted because SELinux User Map test1 requires it
"""
errno = 4307
format = _('%(key)s cannot be deleted because %(label)s %(dependent)s requires it')
class LastMemberError(ExecutionError):
"""
**4308** Raised when an entry being deleted or disabled is last member of a protected group
For example:
>>> raise LastMemberError(key=u'admin', label=u'group', container=u'admins')
Traceback (most recent call last):
...
LastMemberError: admin cannot be deleted or disabled because it is the last member of group admins
"""
errno = 4308
format = _('%(key)s cannot be deleted or disabled because it is the last member of %(label)s %(container)s')
class ProtectedEntryError(ExecutionError):
"""
**4309** Raised when an entry being deleted or modified in a forbidden way is protected
For example:
>>> raise ProtectedEntryError(label=u'group', key=u'admins', reason=_(u'privileged group'))
Traceback (most recent call last):
...
ProtectedEntryError: group admins cannot be deleted/modified: privileged group
"""
errno = 4309
format = _('%(label)s %(key)s cannot be deleted/modified: %(reason)s')
class CertificateInvalidError(CertificateError):
"""
**4310** Raised when a certificate is not valid
For example:
>>> raise CertificateInvalidError(name=_(u'CA'))
Traceback (most recent call last):
...
CertificateInvalidError: CA certificate is not valid
"""
errno = 4310
format = _('%(name)s certificate is not valid')
##############################################################################
# 5000 - 5999: Generic errors
class GenericError(PublicError):
"""
**5000** Base class for errors that don't fit elsewhere (*5000 - 5999*).
"""
errno = 5000
public_errors = tuple(sorted(
messages.iter_messages(globals(), PublicError), key=lambda E: E.errno))
if __name__ == '__main__':
messages.print_report('public errors', public_errors)
| gpl-3.0 |
incnone/necrobot | necrobot/race/racestats.py | 1 | 7328 | import math
from necrobot.race import racedb
from necrobot.util import console, racetime
from necrobot.util.necrodancer.character import NDChar
from necrobot.util.singleton import Singleton
class CharacterStats(object):
def __init__(self, ndchar):
self._ndchar = ndchar
self.number_of_races = 0
self.mean = 0
self.var = 0
self.winrate = 0
self.has_wins = False
@property
def ndchar(self) -> NDChar:
return self._ndchar
@property
def charname(self) -> str:
return self.ndchar.name
@property
def stdev(self) -> float:
return math.sqrt(self.var)
@property
def mean_str(self) -> str:
if self.has_wins:
return racetime.to_str(int(self.mean))
else:
return '--'
@property
def stdev_str(self) -> str:
if self.has_wins:
return racetime.to_str(int(self.stdev))
else:
return '--'
def barf(self) -> None:
console.info('{0:>10} {1:>5} {2:>9} {3:>9} {4:>6}\n'.format(
self.charname,
self.number_of_races,
self.mean_str,
self.stdev_str,
int(self.winrate * 100)))
class GeneralStats(object):
def __init__(self):
self._charstats = []
@property
def infotext(self) -> str:
info_text = '{0:>10} {1:<5} {2:<9} {3:<9} {4}\n'.format('', 'Races', 'Avg', 'Stdev', 'Clear%')
for char in sorted(self._charstats, key=lambda c: c.number_of_races, reverse=True):
info_text += '{0:>10} {1:>5} {2:>9} {3:>9} {4:>6}\n'.format(
char.charname,
char.number_of_races,
char.mean_str,
char.stdev_str,
int(char.winrate*100))
return info_text[:-1]
def insert_charstats(self, char: CharacterStats) -> None:
self._charstats.append(char)
def get_charstats(self, char: NDChar) -> CharacterStats:
for c in self._charstats:
if c.ndchar == char:
return c
return CharacterStats(char)
class StatCache(object, metaclass=Singleton):
class CachedStats(object):
def __init__(self):
self.last_race_number_amplified = 0 # The number of the last race when amplified was cached
self.last_race_number_base = 0 # The number of the last race when base was cached
self.amplified_stats = GeneralStats()
self.base_stats = GeneralStats()
def __init__(self):
self._cache = {} # Map from discord ID's to UserStats
async def get_general_stats(self, user_id, amplified) -> GeneralStats:
last_race_number = await racedb.get_largest_race_number(user_id=user_id)
# Check whether we have an up-to-date cached version, and if so, return it
cached_data = self.CachedStats()
if user_id in self._cache:
cached_data = self._cache[user_id]
if amplified:
if cached_data.last_race_number_amplified == last_race_number:
return cached_data.amplified_stats
else:
if cached_data.last_race_number_base == last_race_number:
return cached_data.base_stats
# If here, the cache is out-of-date
general_stats = GeneralStats()
for row in await racedb.get_allzones_race_numbers(user_id=user_id, amplified=amplified):
char = NDChar.fromstr(row[0])
charstats = CharacterStats(char)
charstats.number_of_races = int(row[1])
total_time = 0
total_squared_time = 0
number_of_wins = 0
number_of_forfeits = 0
for stat_row in await racedb.get_all_racedata(user_id=user_id, char_name=char.name, amplified=amplified):
if int(stat_row[1]) == -2: # finish
time = int(stat_row[0])
total_time += time
total_squared_time += time * time
number_of_wins += 1
else:
number_of_forfeits += 1
if number_of_wins > 0:
charstats.mean = total_time / number_of_wins
if number_of_wins > 1:
charstats.has_wins = True
charstats.var = \
(total_squared_time / (number_of_wins-1)) - charstats.mean * total_time/(number_of_wins-1)
if number_of_wins + number_of_forfeits > 0:
charstats.winrate = number_of_wins / (number_of_wins + number_of_forfeits)
general_stats.insert_charstats(charstats)
# Update the cache
if amplified:
cached_data.last_race_number_amplified = last_race_number
cached_data.amplified_stats = general_stats
else:
cached_data.last_race_number_base = last_race_number
cached_data.base_stats = general_stats
self._cache[user_id] = cached_data
# Return
return general_stats
async def get_general_stats(user_id: int, amplified: bool) -> GeneralStats:
return await StatCache().get_general_stats(user_id, amplified)
async def get_character_stats(user_id: int, ndchar: NDChar, amplified: bool) -> CharacterStats:
general_stats = await StatCache().get_general_stats(user_id, amplified)
return general_stats.get_charstats(ndchar)
async def get_winrates(user_id_1: int, user_id_2: int, ndchar: NDChar, amplified: bool) -> tuple or None:
stats_1 = await get_character_stats(user_id_1, ndchar, amplified)
stats_2 = await get_character_stats(user_id_2, ndchar, amplified)
if not stats_1.has_wins or not stats_2.has_wins:
return None
m2_minus_m1 = stats_2.mean - stats_1.mean
sum_var = stats_1.var + stats_2.var
erf_arg = m2_minus_m1 / math.sqrt(2*sum_var)
if m2_minus_m1 > 0:
winrate_of_1_if_both_finish = (1.0 + math.erf(erf_arg))/2.0
else:
winrate_of_1_if_both_finish = (1.0 - math.erf(-erf_arg))/2.0
both_finish_prob = stats_1.winrate * stats_2.winrate
neither_finish_prob = (1-stats_1.winrate)*(1-stats_2.winrate)
winrate_of_1 = winrate_of_1_if_both_finish*both_finish_prob + (stats_1.winrate - both_finish_prob)
winrate_of_2 = (1.0-winrate_of_1_if_both_finish)*both_finish_prob + (stats_2.winrate - both_finish_prob)
return winrate_of_1, winrate_of_2, neither_finish_prob
async def get_most_races_infotext(ndchar: NDChar, limit: int) -> str:
most_races = await racedb.get_most_races_leaderboard(str(ndchar), limit)
infotext = '{0:>20} {1:>6} {2:>6}\n'.format('', 'Base', 'Amp')
for row in most_races:
infotext += '{0:>20.20} {1:>6} {2:>6}\n'.format(row[0], row[2], row[3])
return infotext
async def get_fastest_times_infotext(ndchar: NDChar, amplified: bool, limit: int) -> str:
fastest_times = await racedb.get_fastest_times_leaderboard(str(ndchar), amplified, limit)
infotext = '{0:>20} {1:<9} {2:<9} {3:<13}\n'.format('', 'Time (rta)', 'Seed', 'Date')
for row in fastest_times:
infotext += '{0:>20.20} {1:>9} {2:>9} {3:>13}\n'.format(
row[0],
racetime.to_str(int(row[1])),
row[2],
row[3].strftime("%b %d, %Y"))
return infotext
| mit |
nguyentu1602/statsmodels | statsmodels/tools/dump2module.py | 33 | 6960 | '''Save a set of numpy arrays to a python module file that can be imported
Author : Josef Perktold
'''
from __future__ import print_function
from statsmodels.compat.python import iterkeys
import numpy as np
class HoldIt(object):
'''Class to write numpy arrays into a python module
Calling save on the instance of this class write all attributes of the
instance into a module file. For details see the save method.
'''
def __init__(self, name):
self.name = name
def save(self, what=None, filename=None, header=True, useinstance=True,
comment=None, print_options=None):
'''write attributes of this instance to python module given by filename
Parameters
----------
what : list or None
list of attributes that are added to the module. If None (default)
then all attributes in __dict__ that do not start with an underline
will be saved.
filename : string
specifies filename with path. If the file does not exist, it will be
created. If the file is already exists, then the new data will be
appended to the file.
header : bool
If true, then the imports of the module and the class definition are
written before writing the data.
useinstance : bool
If true, then the data in the module are attached to an instance of a
holder class. If false, then each array will be saved as separate
variable.
comment : string
If comment is not empty then this string will be attached as a
description comment to the data instance in the saved module.
print_options : dict or None
The print_options for the numpy arrays will be updated with this.
see notes
Notes
-----
The content of an numpy array are written using repr, which can be
controlled with the np.set_printoptions. The numpy default is updated
with: precision=20, linewidth=100, nanstr='nan', infstr='inf'
This should provide enough precision for double floating point numbers.
If one array has more than 1000 elements, then threshold should be
overwritten by the user, see keyword argument print_options.
'''
print_opt_old = np.get_printoptions()
print_opt = dict(precision=20, linewidth=100, nanstr='nan',
infstr='inf')
if print_options:
print_opt.update(print_options)
np.set_printoptions(**print_opt)
#precision corrects for non-scientific notation
if what is None:
what = (i for i in self.__dict__ if i[0] != '_')
if header:
txt = ['import numpy as np\n'
'from numpy import array, rec, inf, nan\n\n']
if useinstance:
txt.append('class Holder(object):\n pass\n\n')
else:
txt = []
if useinstance:
txt.append('%s = Holder()' % self.name)
prefix = '%s.' % self.name
else:
prefix = ''
if not comment is None:
txt.append("%scomment = '%s'" % (prefix, comment))
for x in what:
txt.append('%s%s = %s' % (prefix, x, repr(getattr(self,x))))
txt.extend(['','']) #add empty lines at end
if not filename is None:
file(filename, 'a+').write('\n'.join(txt))
np.set_printoptions(**print_opt_old)
self._filename = filename
self._useinstance = useinstance
self._what = what
return txt
def verify(self):
'''load the saved module and verify the data
This tries several ways of comparing the saved and the attached data,
but might not work for all possible data structures.
Returns
-------
all_correct : bool
true if no differences are found, for floating point numbers
rtol=1e-16, atol=1e-16 is used to determine equality (allclose)
correctli : list
list of attribute names that compare as equal
incorrectli : list
list of attribute names that did not compare as equal, either
because they differ or because the comparison does not handle the
data structure correctly
'''
module = __import__(self._filename.replace('.py',''))
if not self._useinstance:
raise NotImplementedError('currently only implemented when'
'useinstance is true')
data = getattr(module, self.name)
correctli = []
incorrectli = []
for d in self._what:
self_item = getattr(data, d)
saved_item = getattr(data, d)
#print(d)
#try simple equality
correct = np.all(self.item == saved_item)
#try allclose
if not correct and not self.item.dtype == np.dtype('object'):
correct = np.allclose(self_item, saved_item,
rtol=1e-16, atol=1e-16)
if not correct:
import warnings
warnings.warn("inexact precision in "+d, RuntimeWarning)
#try iterating, if object array
if not correct:
correlem =[np.all(data[d].item()[k] ==
getattr(testsave.var_results, d).item()[k])
for k in iterkeys(data[d].item())]
if not correlem:
#print(d, "wrong")
incorrectli.append(d)
correctli.append(d)
return len(incorrectli)==0, correctli, incorrectli
if __name__ == '__main__':
data = np.load(r"E:\Josef\eclipsegworkspace\statsmodels-josef-experimental-030\dist\statsmodels-0.3.0dev_with_Winhelp_a2\statsmodels-0.3.0dev\scikits\statsmodels\tsa\vector_ar\tests\results\vars_results.npz")
res_var = HoldIt('var_results')
for d in data:
setattr(res_var, d, data[d])
np.set_printoptions(precision=120, linewidth=100)
res_var.save(filename='testsave.py', header=True,
comment='VAR test data converted from vars_results.npz')
import testsave
for d in data:
print(d)
correct = np.all(data[d] == getattr(testsave.var_results, d))
if not correct and not data[d].dtype == np.dtype('object'):
correct = np.allclose(data[d], getattr(testsave.var_results, d),
rtol=1e-16, atol=1e-16)
if not correct: print("inexact precision")
if not correct:
correlem =[np.all(data[d].item()[k] ==
getattr(testsave.var_results, d).item()[k])
for k in iterkeys(data[d].item())]
if not correlem:
print(d, "wrong")
print(res_var.verify())
| bsd-3-clause |
aduchate/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/dvi.py | 61 | 2388 | """SCons.Tool.dvi
Common DVI Builder definition for various other Tool modules that use it.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvi.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Builder
import SCons.Tool
DVIBuilder = None
def generate(env):
try:
env['BUILDERS']['DVI']
except KeyError:
global DVIBuilder
if DVIBuilder is None:
# The suffix is hard-coded to '.dvi', not configurable via a
# construction variable like $DVISUFFIX, because the output
# file name is hard-coded within TeX.
DVIBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.LaTeXScanner,
suffix = '.dvi',
emitter = {},
source_ext_match = None)
env['BUILDERS']['DVI'] = DVIBuilder
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
ccsplit/spiderfoot | ext/dns/ipv6.py | 26 | 4976 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""IPv6 helper functions."""
import re
import dns.exception
import dns.ipv4
_leading_zero = re.compile(r'0+([0-9a-f]+)')
def inet_ntoa(address):
"""Convert a network format IPv6 address into text.
@param address: the binary address
@type address: string
@rtype: string
@raises ValueError: the address isn't 16 bytes long
"""
if len(address) != 16:
raise ValueError("IPv6 addresses are 16 bytes long")
hex = address.encode('hex_codec')
chunks = []
i = 0
l = len(hex)
while i < l:
chunk = hex[i : i + 4]
# strip leading zeros. we do this with an re instead of
# with lstrip() because lstrip() didn't support chars until
# python 2.2.2
m = _leading_zero.match(chunk)
if not m is None:
chunk = m.group(1)
chunks.append(chunk)
i += 4
#
# Compress the longest subsequence of 0-value chunks to ::
#
best_start = 0
best_len = 0
start = -1
last_was_zero = False
for i in xrange(8):
if chunks[i] != '0':
if last_was_zero:
end = i
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
last_was_zero = False
elif not last_was_zero:
start = i
last_was_zero = True
if last_was_zero:
end = 8
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
if best_len > 1:
if best_start == 0 and \
(best_len == 6 or
best_len == 5 and chunks[5] == 'ffff'):
# We have an embedded IPv4 address
if best_len == 6:
prefix = '::'
else:
prefix = '::ffff:'
hex = prefix + dns.ipv4.inet_ntoa(address[12:])
else:
hex = ':'.join(chunks[:best_start]) + '::' + \
':'.join(chunks[best_start + best_len:])
else:
hex = ':'.join(chunks)
return hex
_v4_ending = re.compile(r'(.*):(\d+\.\d+\.\d+\.\d+)$')
_colon_colon_start = re.compile(r'::.*')
_colon_colon_end = re.compile(r'.*::$')
def inet_aton(text):
"""Convert a text format IPv6 address into network format.
@param text: the textual address
@type text: string
@rtype: string
@raises dns.exception.SyntaxError: the text was not properly formatted
"""
#
# Our aim here is not something fast; we just want something that works.
#
if text == '::':
text = '0::'
#
# Get rid of the icky dot-quad syntax if we have it.
#
m = _v4_ending.match(text)
if not m is None:
b = dns.ipv4.inet_aton(m.group(2))
text = "%s:%02x%02x:%02x%02x" % (m.group(1), ord(b[0]), ord(b[1]),
ord(b[2]), ord(b[3]))
#
# Try to turn '::<whatever>' into ':<whatever>'; if no match try to
# turn '<whatever>::' into '<whatever>:'
#
m = _colon_colon_start.match(text)
if not m is None:
text = text[1:]
else:
m = _colon_colon_end.match(text)
if not m is None:
text = text[:-1]
#
# Now canonicalize into 8 chunks of 4 hex digits each
#
chunks = text.split(':')
l = len(chunks)
if l > 8:
raise dns.exception.SyntaxError
seen_empty = False
canonical = []
for c in chunks:
if c == '':
if seen_empty:
raise dns.exception.SyntaxError
seen_empty = True
for i in xrange(0, 8 - l + 1):
canonical.append('0000')
else:
lc = len(c)
if lc > 4:
raise dns.exception.SyntaxError
if lc != 4:
c = ('0' * (4 - lc)) + c
canonical.append(c)
if l < 8 and not seen_empty:
raise dns.exception.SyntaxError
text = ''.join(canonical)
#
# Finally we can go to binary.
#
try:
return text.decode('hex_codec')
except TypeError:
raise dns.exception.SyntaxError
| gpl-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.5/django/contrib/gis/geos/prototypes/__init__.py | 314 | 1305 | """
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import (create_cs, get_cs,
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz,
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims)
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import (from_hex, from_wkb, from_wkt,
create_point, create_linestring, create_linearring, create_polygon, create_collection,
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone,
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid,
get_dims, get_num_coords, get_num_geoms,
to_hex, to_wkb, to_wkt)
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import *
# Predicates
from django.contrib.gis.geos.prototypes.predicates import (geos_hasz, geos_isempty,
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses,
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects,
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within)
# Topology routines
from django.contrib.gis.geos.prototypes.topology import *
| mit |
siemens/django-mantis-actionables | mantis_actionables/migrations/0012_auto_20150224_1253.py | 2 | 3098 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('dingos', '0005_AddTaggingHistory'),
('mantis_actionables', '0011_singletonobservable_actionable_tags_cache'),
]
operations = [
migrations.RemoveField(
model_name='importinfo',
name='comment',
),
migrations.RemoveField(
model_name='importinfo',
name='user',
),
migrations.AddField(
model_name='importinfo',
name='create_timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 24, 12, 52, 32, 263422), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='importinfo',
name='creating_action',
field=models.ForeignKey(related_name=b'import_infos', default=0, to='mantis_actionables.Action'),
preserve_default=False,
),
migrations.AddField(
model_name='importinfo',
name='description',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='importinfo',
name='name',
field=models.CharField(default=b'Unnamed', help_text=b"Name of the information object, usually auto generated.\n from type and facts flagged as 'naming'.", max_length=255, editable=False, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='importinfo',
name='namespace',
field=models.ForeignKey(default=0, to='dingos.IdentifierNameSpace'),
preserve_default=False,
),
migrations.AddField(
model_name='importinfo',
name='related_threatactor',
field=models.CharField(default='', max_length=255, blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='importinfo',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 24, 12, 53, 49, 294188), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='importinfo',
name='uid',
field=models.SlugField(default=b'', max_length=255),
preserve_default=True,
),
migrations.AddField(
model_name='importinfo',
name='uri',
field=models.URLField(default=b'', help_text=b'URI pointing to further\n information concerning this\n import, e.g., the HTML\n report of a malware analysis\n through Cuckoo or similar.', blank=True),
preserve_default=True,
),
]
| gpl-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.5/tests/regressiontests/utils/decorators.py | 115 | 3920 | from django.http import HttpResponse
from django.template import Template, Context
from django.template.response import TemplateResponse
from django.test import TestCase, RequestFactory
from django.utils.decorators import decorator_from_middleware
class ProcessViewMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
pass
process_view_dec = decorator_from_middleware(ProcessViewMiddleware)
@process_view_dec
def process_view(request):
return HttpResponse()
class ClassProcessView(object):
def __call__(self, request):
return HttpResponse()
class_process_view = process_view_dec(ClassProcessView())
class FullMiddleware(object):
def process_request(self, request):
request.process_request_reached = True
def process_view(sef, request, view_func, view_args, view_kwargs):
request.process_view_reached = True
def process_template_response(self, request, response):
request.process_template_response_reached = True
return response
def process_response(self, request, response):
# This should never receive unrendered content.
request.process_response_content = response.content
request.process_response_reached = True
return response
full_dec = decorator_from_middleware(FullMiddleware)
class DecoratorFromMiddlewareTests(TestCase):
"""
Tests for view decorators created using
``django.utils.decorators.decorator_from_middleware``.
"""
rf = RequestFactory()
def test_process_view_middleware(self):
"""
Test a middleware that implements process_view.
"""
process_view(self.rf.get('/'))
def test_callable_process_view_middleware(self):
"""
Test a middleware that implements process_view, operating on a callable class.
"""
class_process_view(self.rf.get('/'))
def test_full_dec_normal(self):
"""
Test that all methods of middleware are called for normal HttpResponses
"""
@full_dec
def normal_view(request):
t = Template("Hello world")
return HttpResponse(t.render(Context({})))
request = self.rf.get('/')
response = normal_view(request)
self.assertTrue(getattr(request, 'process_request_reached', False))
self.assertTrue(getattr(request, 'process_view_reached', False))
# process_template_response must not be called for HttpResponse
self.assertFalse(getattr(request, 'process_template_response_reached', False))
self.assertTrue(getattr(request, 'process_response_reached', False))
def test_full_dec_templateresponse(self):
"""
Test that all methods of middleware are called for TemplateResponses in
the right sequence.
"""
@full_dec
def template_response_view(request):
t = Template("Hello world")
return TemplateResponse(request, t, {})
request = self.rf.get('/')
response = template_response_view(request)
self.assertTrue(getattr(request, 'process_request_reached', False))
self.assertTrue(getattr(request, 'process_view_reached', False))
self.assertTrue(getattr(request, 'process_template_response_reached', False))
# response must not be rendered yet.
self.assertFalse(response._is_rendered)
# process_response must not be called until after response is rendered,
# otherwise some decorators like csrf_protect and gzip_page will not
# work correctly. See #16004
self.assertFalse(getattr(request, 'process_response_reached', False))
response.render()
self.assertTrue(getattr(request, 'process_response_reached', False))
# Check that process_response saw the rendered content
self.assertEqual(request.process_response_content, b"Hello world")
| bsd-3-clause |
adrianholovaty/django | django/contrib/formtools/tests/forms.py | 99 | 1164 | from django import forms
from django.contrib.formtools.wizard import FormWizard
from django.http import HttpResponse
class Page1(forms.Form):
name = forms.CharField(max_length=100)
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
class ContactWizard(FormWizard):
def done(self, request, form_list):
return HttpResponse("")
class TestForm(forms.Form):
field1 = forms.CharField()
field1_ = forms.CharField()
bool1 = forms.BooleanField(required=False)
class HashTestForm(forms.Form):
name = forms.CharField()
bio = forms.CharField()
class HashTestBlankForm(forms.Form):
name = forms.CharField(required=False)
bio = forms.CharField(required=False)
class WizardPageOneForm(forms.Form):
field = forms.CharField()
class WizardPageTwoForm(forms.Form):
field = forms.CharField()
class WizardPageTwoAlternativeForm(forms.Form):
field = forms.CharField()
class WizardPageThreeForm(forms.Form):
field = forms.CharField()
| bsd-3-clause |
erjohnso/ansible | lib/ansible/modules/network/aci/aci_intf_policy_lldp.py | 22 | 3973 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_intf_policy_lldp
short_description: Manage LLDP interface policies on Cisco ACI fabrics (lldp:IfPol)
description:
- Manage LLDP interface policies on Cisco ACI fabrics.
- More information from the internal APIC class
I(lldp:IfPol) at U(https://developer.cisco.com/media/mim-ref/MO-lldpIfPol.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob McGill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
options:
lldp_policy:
description:
- The LLDP interface policy name.
required: yes
aliases: [ name ]
description:
description:
- The description for the LLDP interface policy name.
aliases: [ descr ]
receive_state:
description:
- Enable or disable Receive state (FIXME!)
required: yes
choices: [ disabled, enabled ]
default: enabled
transmit_state:
description:
- Enable or Disable Transmit state (FIXME!)
required: false
choices: [ disabled, enabled ]
default: enabled
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_intf_policy_lldp:
hostname: '{{ hostname }}'
username: '{{ username }}'
password: '{{ password }}'
lldp_policy: '{{ lldp_policy }}'
description: '{{ description }}'
receive_state: '{{ receive_state }}'
transmit_state: '{{ transmit_state }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
lldp_policy=dict(type='str', require=False, aliases=['name']),
description=dict(type='str', aliases=['descr']),
receive_state=dict(type='str', choices=['disabled', 'enabled']),
transmit_state=dict(type='str', choices=['disabled', 'enabled']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['lldp_policy']],
['state', 'present', ['lldp_policy']],
],
)
lldp_policy = module.params['lldp_policy']
description = module.params['description']
receive_state = module.params['receive_state']
transmit_state = module.params['transmit_state']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(root_class='lldp_policy')
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='lldpIfPol',
class_config=dict(
name=lldp_policy,
descr=description,
adminRxSt=receive_state,
adminTxSt=transmit_state,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='lldpIfPol')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| gpl-3.0 |
geary/claslite | web/app/lib/simplejson/tests/test_encode_basestring_ascii.py | 95 | 2301 | from unittest import TestCase
import simplejson.encoder
CASES = [
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
(u'controls', '"controls"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(u' s p a c e d ', '" s p a c e d "'),
(u'\U0001d120', '"\\ud834\\udd20"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBaseStringAscii(TestCase):
def test_py_encode_basestring_ascii(self):
self._test_encode_basestring_ascii(simplejson.encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self):
if not simplejson.encoder.c_encode_basestring_ascii:
return
self._test_encode_basestring_ascii(simplejson.encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
fname = encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = encode_basestring_ascii(input_string)
#self.assertEquals(result, expect,
# '{0!r} != {1!r} for {2}({3!r})'.format(
# result, expect, fname, input_string))
self.assertEquals(result, expect,
'%r != %r for %s(%r)' % (result, expect, fname, input_string))
def test_sorted_dict(self):
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = simplejson.dumps(dict(items), sort_keys=True)
self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}')
| unlicense |
pygeek/django | tests/modeltests/delete/models.py | 115 | 3670 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class R(models.Model):
is_default = models.BooleanField(default=False)
def __str__(self):
return "%s" % self.pk
get_default_r = lambda: R.objects.get_or_create(is_default=True)[0]
class S(models.Model):
r = models.ForeignKey(R)
class T(models.Model):
s = models.ForeignKey(S)
class U(models.Model):
t = models.ForeignKey(T)
class RChild(R):
pass
class A(models.Model):
name = models.CharField(max_length=30)
auto = models.ForeignKey(R, related_name="auto_set")
auto_nullable = models.ForeignKey(R, null=True,
related_name='auto_nullable_set')
setvalue = models.ForeignKey(R, on_delete=models.SET(get_default_r),
related_name='setvalue')
setnull = models.ForeignKey(R, on_delete=models.SET_NULL, null=True,
related_name='setnull_set')
setdefault = models.ForeignKey(R, on_delete=models.SET_DEFAULT,
default=get_default_r, related_name='setdefault_set')
setdefault_none = models.ForeignKey(R, on_delete=models.SET_DEFAULT,
default=None, null=True, related_name='setnull_nullable_set')
cascade = models.ForeignKey(R, on_delete=models.CASCADE,
related_name='cascade_set')
cascade_nullable = models.ForeignKey(R, on_delete=models.CASCADE, null=True,
related_name='cascade_nullable_set')
protect = models.ForeignKey(R, on_delete=models.PROTECT, null=True)
donothing = models.ForeignKey(R, on_delete=models.DO_NOTHING, null=True,
related_name='donothing_set')
child = models.ForeignKey(RChild, related_name="child")
child_setnull = models.ForeignKey(RChild, on_delete=models.SET_NULL, null=True,
related_name="child_setnull")
# A OneToOneField is just a ForeignKey unique=True, so we don't duplicate
# all the tests; just one smoke test to ensure on_delete works for it as
# well.
o2o_setnull = models.ForeignKey(R, null=True,
on_delete=models.SET_NULL, related_name="o2o_nullable_set")
def create_a(name):
a = A(name=name)
for name in ('auto', 'auto_nullable', 'setvalue', 'setnull', 'setdefault',
'setdefault_none', 'cascade', 'cascade_nullable', 'protect',
'donothing', 'o2o_setnull'):
r = R.objects.create()
setattr(a, name, r)
a.child = RChild.objects.create()
a.child_setnull = RChild.objects.create()
a.save()
return a
class M(models.Model):
m2m = models.ManyToManyField(R, related_name="m_set")
m2m_through = models.ManyToManyField(R, through="MR",
related_name="m_through_set")
m2m_through_null = models.ManyToManyField(R, through="MRNull",
related_name="m_through_null_set")
class MR(models.Model):
m = models.ForeignKey(M)
r = models.ForeignKey(R)
class MRNull(models.Model):
m = models.ForeignKey(M)
r = models.ForeignKey(R, null=True, on_delete=models.SET_NULL)
class Avatar(models.Model):
desc = models.TextField(null=True)
class User(models.Model):
avatar = models.ForeignKey(Avatar, null=True)
class HiddenUser(models.Model):
r = models.ForeignKey(R, related_name="+")
class HiddenUserProfile(models.Model):
user = models.ForeignKey(HiddenUser)
class M2MTo(models.Model):
pass
class M2MFrom(models.Model):
m2m = models.ManyToManyField(M2MTo)
class Parent(models.Model):
pass
class Child(Parent):
pass
class Base(models.Model):
pass
class RelToBase(models.Model):
base = models.ForeignKey(Base, on_delete=models.DO_NOTHING)
| bsd-3-clause |
ujnzxw/oh-my-earth | manage_momentum.py | 1 | 1653 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
'''
Created on Jun 04, 2017
@author ujnzxw <ujnzxw@gmail.com>
'''
import urllib,re,sys,os,random,json
from config import momentumdash_output_file
from utils import set_background, get_desktop_environment
class MomentumdashManager():
'''
momentumdash.com background image manager
'''
def __init__(self):
pass
def get_bg_picture(self):
'''
get background picture from https://momentumdash.com
'''
'''
background image json file:
https://momentumdash.com/app/backgrounds.json
'''
index = random.randint(0,13)
json_url = "https://momentumdash.com/app/backgrounds.json"
response = urllib.urlopen(json_url)
json_data = json.loads(response.read())
filename = json_data['backgrounds'][index]['filename']
imgurl = 'https://momentumdash.com/backgrounds/' + filename
urllib.urlretrieve(imgurl, momentumdash_output_file)
def run(self):
''' main function '''
''' get background picture from momentumdash.com '''
print("Updating momentumdash image...")
self.get_bg_picture()
print("\nSaving to '%s'..." % (momentumdash_output_file))
''' set background picture as wallpaper '''
''' scaled, wallpaper, stretched, spanned '''
if not set_background(momentumdash_output_file, "stretched"):
exit("Your desktop environment '{}' is not supported.".format(get_desktop_environment()))
print("Done!")
if __name__ == "__main__":
b = MomentumdashManager()
b.run()
| mit |
RanadeepPolavarapu/kuma | vendor/packages/pygments/formatters/__init__.py | 44 | 3597 | # -*- coding: utf-8 -*-
"""
pygments.formatters
~~~~~~~~~~~~~~~~~~~
Pygments formatters.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
from pygments.util import ClassNotFound, itervalues
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
'get_all_formatters'] + list(FORMATTERS)
_formatter_cache = {} # classes by name
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_formatters(module_name):
"""Load a formatter (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for formatter_name in mod.__all__:
cls = getattr(mod, formatter_name)
_formatter_cache[cls.name] = cls
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
for info in itervalues(FORMATTERS):
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
for _, formatter in find_plugin_formatters():
yield formatter
def find_formatter_class(alias):
"""Lookup a formatter by alias.
Returns None if not found.
"""
for module_name, name, aliases, _, _ in itervalues(FORMATTERS):
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
return _formatter_cache[name]
for _, cls in find_plugin_formatters():
if alias in cls.aliases:
return cls
def get_formatter_by_name(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options)
def get_formatter_for_filename(fn, **options):
"""Lookup and instantiate a formatter by filename pattern.
Raises ClassNotFound if not found.
"""
fn = basename(fn)
for modname, name, _, filenames, _ in itervalues(FORMATTERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if _fn_matches(fn, filename):
return cls(**options)
raise ClassNotFound("no formatter found for file name %r" % fn)
class _automodule(types.ModuleType):
"""Automatically import formatters."""
def __getattr__(self, name):
info = FORMATTERS.get(name)
if info:
_load_formatters(info[0])
cls = _formatter_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
| mpl-2.0 |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/django/contrib/staticfiles/utils.py | 114 | 1976 | import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| mit |
herod2k/buildroot-linux-kernel-m6 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
BeDjango/intef-openedx | common/lib/xmodule/xmodule/modulestore/tests/test_split_w_old_mongo.py | 46 | 6683 | import datetime
import random
import unittest
import uuid
from nose.plugins.attrib import attr
import mock
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.mongo import DraftMongoModuleStore
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.utils import MemoryCache
@attr('mongo')
class SplitWMongoCourseBootstrapper(unittest.TestCase):
"""
Helper for tests which need to construct split mongo & old mongo based courses to get interesting internal structure.
Override _create_course and after invoking the super() _create_course, have it call _create_item for
each xblock you want in the course.
This class ensures the db gets created, opened, and cleaned up in addition to creating the course
Defines the following attrs on self:
* user_id: a random non-registered mock user id
* split_mongo: a pointer to the split mongo instance
* draft_mongo: a pointer to the old draft instance
* split_course_key (CourseLocator): of the new course
* old_course_key: the SlashSpecifiedCourseKey for the course
"""
# Snippet of what would be in the django settings envs file
db_config = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_xmodule',
}
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': '',
'render_template': mock.Mock(return_value=""),
'xblock_mixins': (InheritanceMixin, XModuleMixin)
}
split_course_key = CourseLocator('test_org', 'test_course', 'runid', branch=ModuleStoreEnum.BranchName.draft)
def setUp(self):
self.db_config['collection'] = 'modulestore{0}'.format(uuid.uuid4().hex[:5])
self.user_id = random.getrandbits(32)
super(SplitWMongoCourseBootstrapper, self).setUp()
self.split_mongo = SplitMongoModuleStore(
None,
self.db_config,
**self.modulestore_options
)
self.addCleanup(self.split_mongo.db.connection.close)
self.addCleanup(self.tear_down_split)
self.draft_mongo = DraftMongoModuleStore(
None, self.db_config, branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
metadata_inheritance_cache_subsystem=MemoryCache(),
**self.modulestore_options
)
self.addCleanup(self.tear_down_mongo)
self.old_course_key = None
self.runtime = None
self._create_course()
def tear_down_split(self):
"""
Remove the test collections, close the db connection
"""
split_db = self.split_mongo.db
split_db.drop_collection(split_db.course_index.proxied_object)
split_db.drop_collection(split_db.structures.proxied_object)
split_db.drop_collection(split_db.definitions.proxied_object)
def tear_down_mongo(self):
"""
Remove the test collections, close the db connection
"""
split_db = self.split_mongo.db
# old_mongo doesn't give a db attr, but all of the dbs are the same
split_db.drop_collection(self.draft_mongo.collection.proxied_object)
def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):
"""
Create the item of the given category and block id in split and old mongo, add it to the optional
parent. The parent category is only needed because old mongo requires it for the id.
Note: if draft = False, it will create the draft and then publish it; so, it will overwrite any
existing draft for both the new item and the parent
"""
location = self.old_course_key.make_usage_key(category, name)
self.draft_mongo.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id,
definition_data=data,
metadata=metadata,
runtime=self.runtime
)
if not draft:
self.draft_mongo.publish(location, self.user_id)
if isinstance(data, basestring):
fields = {'data': data}
else:
fields = data.copy()
fields.update(metadata)
if parent_name:
# add child to parent in mongo
parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)
parent = self.draft_mongo.get_item(parent_location)
parent.children.append(location)
self.draft_mongo.update_item(parent, self.user_id)
if not draft:
self.draft_mongo.publish(parent_location, self.user_id)
# create child for split
if split:
self.split_mongo.create_child(
self.user_id,
BlockUsageLocator(
course_key=self.split_course_key,
block_type=parent_category,
block_id=parent_name
),
category,
block_id=name,
fields=fields
)
else:
if split:
self.split_mongo.create_item(
self.user_id,
self.split_course_key,
category,
block_id=name,
fields=fields
)
def _create_course(self, split=True):
"""
* some detached items
* some attached children
* some orphans
"""
metadata = {
'start': datetime.datetime(2000, 3, 13, 4),
'display_name': 'Migration test course',
}
data = {
'wiki_slug': 'test_course_slug'
}
fields = metadata.copy()
fields.update(data)
if split:
# split requires the course to be created separately from creating items
self.split_mongo.create_course(
self.split_course_key.org, self.split_course_key.course, self.split_course_key.run, self.user_id, fields=fields, root_block_id='runid'
)
old_course = self.draft_mongo.create_course(self.split_course_key.org, 'test_course', 'runid', self.user_id, fields=fields)
self.old_course_key = old_course.id
self.runtime = old_course.runtime
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.