repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
hoh/Billabong | billabong/tests/test_add.py | 1 | 2236 | # Copyright (c) 2015 "Hugo Herter http://hugoherter.com"
#
# This file is part of Billabong.
#
# Intercom is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test file import into Billabong."""
import pytest
import json
from datetime import datetime
from billabong import billabong
from billabong.utils import json_handler
from billabong.settings import inventory, stores
STORE = stores[0]
HASH = "fc7d4f43945d94c874415e3bd9a6e181f8c84f8a36f586389405e391c01e48b2"
def test_add_file():
"""Test adding a file with a defined key."""
# Test using a know replicable :key:
meta = billabong.add_file('hello.txt', key=b'0'*32)
assert meta
assert meta['hash'].startswith('sha256-')
assert meta['hash'] == 'sha256-' + HASH
assert meta['info']['path']
assert meta['info']['filename']
assert meta['timestamp'] > 1450000000
assert isinstance(meta['datetime'], datetime)
inventory.delete(meta['id'])
STORE.delete(meta['blob'])
def test_add_random_key():
"""Test adding a file with no defined key."""
meta = billabong.add_file('lorem.txt')
assert meta
inventory.delete(meta['id'])
STORE.delete(meta['blob'])
def test_add_file_not_found():
"""Test that adding a file that does not exist fails."""
with pytest.raises(FileNotFoundError):
billabong.add_file('does not exist.txt')
def test_add_file_json():
"""Test that the a record can is JSON serializable."""
meta = billabong.add_file('hello.txt', key=b'0'*32)
assert len(json.dumps(meta, default=json_handler)) > 1 # JSON serializable
inventory.delete(meta['id'])
STORE.delete(meta['blob'])
| agpl-3.0 |
spectralDNS/shenfun | demo/NavierStokes.py | 1 | 1882 | """
Simple spectral Navier-Stokes solver
Not implemented for efficiency. For efficiency use the Navier-Stokes
solver in the https://github.com/spectralDNS/spectralDNS repository
"""
import numpy as np
from shenfun import *
nu = 0.000625
end_time = 0.1
dt = 0.01
N = (2**5, 2**5, 2**5)
V0 = FunctionSpace(N[0], 'F', dtype='D')
V1 = FunctionSpace(N[1], 'F', dtype='D')
V2 = FunctionSpace(N[2], 'F', dtype='d')
T = TensorProductSpace(comm, (V0, V1, V2), **{'planner_effort': 'FFTW_MEASURE'})
TV = VectorSpace(T)
u = TrialFunction(T)
v = TestFunction(T)
U = Array(TV)
U_hat = Function(TV)
P_hat = Function(T)
curl_hat = Function(TV)
W = Array(TV)
curl_ = Array(TV)
A = inner(grad(u), grad(v))
def LinearRHS(self, u, **params):
return nu*div(grad(u))
def NonlinearRHS(self, U, U_hat, dU, **params):
global TV, curl_hat, curl_, P_hat, W
curl_hat = project(curl(U_hat), TV, output_array=curl_hat)
curl_ = TV.backward(curl_hat, curl_)
U = U_hat.backward(U)
W[:] = np.cross(U, curl_, axis=0) # Nonlinear term in physical space
dU = project(W, TV, output_array=dU) # dU = W.forward(dU)
P_hat = A.solve(inner(div(dU), v), P_hat)
dU += inner(grad(P_hat), TestFunction(TV))
return dU
if __name__ == '__main__':
X = T.local_mesh(True)
for i, integrator in enumerate((ETD, RK4, ETDRK4)):
# Initialization
U[0] = np.sin(X[0])*np.cos(X[1])*np.cos(X[2])
U[1] = -np.cos(X[0])*np.sin(X[1])*np.cos(X[2])
U[2] = 0
U_hat = TV.forward(U, U_hat)
# Solve
integ = integrator(TV, L=LinearRHS, N=NonlinearRHS)
U_hat = integ.solve(U, U_hat, dt, (0, end_time))
# Check accuracy
U = U_hat.backward(U)
k = comm.reduce(0.5*np.sum(U*U)/np.prod(np.array(N)))
if comm.Get_rank() == 0:
assert np.round(k - 0.124953117517, (4, 7, 7)[i]) == 0
| bsd-2-clause |
datafolklabs/cement | cement/core/plugin.py | 1 | 1713 | """Cement core plugins module."""
from abc import abstractmethod
from ..core.interface import Interface
from ..core.handler import Handler
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class PluginInterface(Interface):
"""
This class defines the Plugin Interface. Handlers that implement this
interface must provide the methods and attributes defined below. In
general, most implementations should sub-class from the provided
:class:`PluginHandler` base class as a starting point.
"""
class Meta:
#: String identifier of the interface.
interface = 'plugin'
@abstractmethod
def load_plugin(plugin_name):
"""
Load a plugin whose name is ``plugin_name``.
Args:
plugin_name (str): The name of the plugin to load.
"""
pass # pragma: nocover
@abstractmethod
def load_plugins(self, plugins):
"""
Load all plugins from ``plugins``.
Args:
plugins (list): A list of plugin names to load.
"""
pass # pragma: nocover
@abstractmethod
def get_loaded_plugins(self):
"""Returns a list of plugins that have been loaded."""
pass # pragma: nocover
@abstractmethod
def get_enabled_plugins(self):
"""Returns a list of plugins that are enabled in the config."""
pass # pragma: nocover
@abstractmethod
def get_disabled_plugins(self):
"""Returns a list of plugins that are disabled in the config."""
pass # pragma: nocover
class PluginHandler(PluginInterface, Handler):
"""
Plugin handler implementation.
"""
pass # pragma: nocover
| bsd-3-clause |
xinjiguaike/edx-platform | common/djangoapps/student/migrations/0010_auto__chg_field_courseenrollment_course_id.py | 188 | 9725 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'CourseEnrollment.course_id'
db.alter_column('student_courseenrollment', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
def backwards(self, orm):
# Changing field 'CourseEnrollment.course_id'
db.alter_column('student_courseenrollment', 'course_id', self.gf('django.db.models.fields.IntegerField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
gokudomatic/cobiv | cobiv/modules/database/sqlitedb/search/customtablestrategy.py | 1 | 6029 | import time
from cobiv.modules.database.sqlitedb.search.defaultstrategy import DefaultSearchStrategy
class CustomTableStrategy(DefaultSearchStrategy):
"""Common search strategy for custom tables. Given a custom table with a least a file key column, the strategy maps
automatically all the specified fields of the table to be sortable and searchable"""
def __init__(self, tablename, fields, file_key="file_key"):
"""Constructor
:param tablename: Name of the SQL table
:param fields: list of columns of the table to map as searchable and sortable
:param file_key: column name that is the foreign key to the file id
"""
super(CustomTableStrategy, self).__init__()
self.tablename = tablename
self.file_key_name = file_key
self.fields = fields
self.operator_functions = {
'in': [self.prepare_in, self.parse_in, self.join_query_core_tags],
'%': [self.prepare_in, self.parse_partial, self.join_in_query_core_tags],
'>': [self.prepare_greater_than, self.parse_greater_than, self.join_query_core_tags],
'<': [self.prepare_lower_than, self.parse_lower_than, self.join_query_core_tags],
'>=': [self.prepare_greater_than, self.parse_greater_equals, self.join_query_core_tags],
'<=': [self.prepare_lower_than, self.parse_lower_equals, self.join_query_core_tags],
'><': [self.prepare_in, self.parse_between, self.join_query_core_tags],
'YY': [self.prepare_in, self.parse_in_year, self.join_query_core_tags],
'YM': [self.prepare_in, self.parse_in_year_month, self.join_query_core_tags],
'YMD': [self.prepare_in, self.parse_in_year_month_day, self.join_query_core_tags]
}
def prepare(self, is_excluding, lists, kind, fn, values):
if not self.tablename in lists:
lists[self.tablename] = ({}, {})
to_include, to_exclude = lists[self.tablename]
self.prepare_function(to_exclude if is_excluding else to_include, fn, kind, values)
def process(self, lists, subqueries):
if not self.tablename in lists:
return
to_include, to_exclude = lists[self.tablename]
subquery = ""
if len(to_include) > 0:
for kind in to_include:
for fn in to_include[kind]:
values = to_include[kind][fn]
subquery += " and " * (len(subquery) > 0)
subquery += self.render_function(fn, kind, values, False)
if len(to_exclude) > 0:
for kind in to_exclude:
for fn in to_exclude[kind]:
values = to_exclude[kind][fn]
subquery += " and " * (len(subquery) > 0)
subquery += "not " + self.render_function(fn, kind, values, True)
subqueries.append((False, "select %s from %s where %s" % (self.file_key_name, self.tablename, subquery)))
def is_managing_kind(self, kind):
return kind in self.fields
def prepare_in(self, crit_list, fn, kind, values):
if not fn in crit_list[kind]:
crit_list[kind][fn] = []
crit_list[kind][fn].append(values)
def parse_in(self, kind, values_set):
result = ""
for values in values_set:
result += self.add_query(result, " and ", '%s in ("%s")' % (kind, '", "'.join(values)))
return result
def parse_partial(self, kind, values_set):
result = ""
for value in values_set:
result += self.add_query(result, " or ", '%s like "%s"' % (kind, value))
return result
def parse_greater_than(self, kind, value):
return 'cast(%s as float)>%s' % (kind, value)
def parse_lower_than(self, kind, value):
return 'cast(%s as float)<%s' % (kind, value)
def parse_greater_equals(self, kind, value):
return 'cast(%s as float)>=%s' % (kind, value)
def parse_lower_equals(self, kind, value):
return 'cast(%s as float)<=%s' % (kind, value)
def parse_between(self, kind, sets_values):
result = ''
for values in sets_values:
it = iter(values)
subquery = ''
for val_from in it:
val_to = next(it)
subquery += self.add_query(subquery, ' or ',
'cast(%s as float) between %s and %s' % (kind, val_from, val_to))
result += self.add_query(result, ' and ', '(%s)' % subquery)
return result
def parse_in_date(self, kind, sets_values, fn_from, fn_to):
subquery = ''
for values in sets_values:
for value in values:
date_from = fn_from(value)
date_to = fn_to(value)
subquery += self.add_query(subquery, ' or ', 'cast(%s as float) between %s and %s' % (kind,
time.mktime(
date_from.timetuple()),
time.mktime(
date_to.timetuple())))
return "(%s)" % subquery
# Joins
def join_query_core_tags(self, fn, kind, values, is_except=False):
return fn(kind, values)
def join_in_query_core_tags(self, fn, kind, valueset, is_except=False):
query = ''
joiner = ' or ' if is_except else ' and '
for values in valueset:
query += self.add_query(query, joiner, fn(kind, values))
return "%s (%s)" % ("not" if is_except else "", query)
def get_sort_query(self, kind, order, is_number):
return kind + ' desc' * order
def get_sort_field(self, kind, order, is_number):
return kind
| mit |
rizumu/django | tests/shortcuts/views.py | 87 | 2274 | from django.shortcuts import render, render_to_response
from django.template import RequestContext
def render_to_response_view(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_multiple_templates(request):
return render_to_response([
'shortcuts/no_such_template.html',
'shortcuts/render_test.html',
], {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_content_type(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_to_response_view_with_status(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_to_response_view_with_using(request):
using = request.GET.get('using')
return render_to_response('shortcuts/using.html', using=using)
def context_processor(request):
return {'bar': 'context processor output'}
def render_to_response_with_context_instance_misuse(request):
context_instance = RequestContext(request, {}, processors=[context_processor])
# Incorrect -- context_instance should be passed as a keyword argument.
return render_to_response('shortcuts/render_test.html', context_instance)
def render_view(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_multiple_templates(request):
return render(request, [
'shortcuts/no_such_template.html',
'shortcuts/render_test.html',
], {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_content_type(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_using(request):
using = request.GET.get('using')
return render(request, 'shortcuts/using.html', using=using)
| bsd-3-clause |
hojel/youtube-dl | youtube_dl/extractor/channel9.py | 27 | 11365 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_filesize,
qualities,
)
class Channel9IE(InfoExtractor):
'''
Common extractor for channel9.msdn.com.
The type of provided URL (video or playlist) is determined according to
meta Search.PageType from web page HTML rather than URL itself, as it is
not always possible to do.
'''
IE_DESC = 'Channel 9'
IE_NAME = 'channel9'
_VALID_URL = r'https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
_TESTS = [
{
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
'md5': 'bbd75296ba47916b754e73c3a4bbdf10',
'info_dict': {
'id': 'Events/TechEd/Australia/2013/KOS002',
'ext': 'mp4',
'title': 'Developer Kick-Off Session: Stuff We Love',
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
'duration': 4576,
'thumbnail': 're:http://.*\.jpg',
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
},
},
{
'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
'md5': 'b43ee4529d111bc37ba7ee4f34813e68',
'info_dict': {
'id': 'posts/Self-service-BI-with-Power-BI-nuclear-testing',
'ext': 'mp4',
'title': 'Self-service BI with Power BI - nuclear testing',
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
'thumbnail': 're:http://.*\.jpg',
'authors': ['Mike Wilmot'],
},
},
{
# low quality mp4 is best
'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
'info_dict': {
'id': 'Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
'ext': 'mp4',
'title': 'Ranges for the Standard Library',
'description': 'md5:2e6b4917677af3728c5f6d63784c4c5d',
'duration': 5646,
'thumbnail': 're:http://.*\.jpg',
},
'params': {
'skip_download': True,
},
}
]
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
def _formats_from_html(self, html):
FORMAT_REGEX = r'''
(?x)
<a\s+href="(?P<url>[^"]+)">(?P<quality>[^<]+)</a>\s*
<span\s+class="usage">\((?P<note>[^\)]+)\)</span>\s*
(?:<div\s+class="popup\s+rounded">\s*
<h3>File\s+size</h3>\s*(?P<filesize>.*?)\s*
</div>)? # File size part may be missing
'''
quality = qualities((
'MP3', 'MP4',
'Low Quality WMV', 'Low Quality MP4',
'Mid Quality WMV', 'Mid Quality MP4',
'High Quality WMV', 'High Quality MP4'))
formats = [{
'url': x.group('url'),
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
'filesize_approx': parse_filesize(x.group('filesize')),
'quality': quality(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
} for x in list(re.finditer(FORMAT_REGEX, html))]
self._sort_formats(formats)
return formats
def _extract_title(self, html):
title = self._html_search_meta('title', html, 'title')
if title is None:
title = self._og_search_title(html)
TITLE_SUFFIX = ' (Channel 9)'
if title is not None and title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)]
return title
def _extract_description(self, html):
DESCRIPTION_REGEX = r'''(?sx)
<div\s+class="entry-content">\s*
<div\s+id="entry-body">\s*
(?P<description>.+?)\s*
</div>\s*
</div>
'''
m = re.search(DESCRIPTION_REGEX, html)
if m is not None:
return m.group('description')
return self._html_search_meta('description', html, 'description')
def _extract_duration(self, html):
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
def _extract_slides(self, html):
m = re.search(r'<a href="(?P<slidesurl>[^"]+)" class="slides">Slides</a>', html)
return m.group('slidesurl') if m is not None else None
def _extract_zip(self, html):
m = re.search(r'<a href="(?P<zipurl>[^"]+)" class="zip">Zip</a>', html)
return m.group('zipurl') if m is not None else None
def _extract_avg_rating(self, html):
m = re.search(r'<p class="avg-rating">Avg Rating: <span>(?P<avgrating>[^<]+)</span></p>', html)
return float(m.group('avgrating')) if m is not None else 0
def _extract_rating_count(self, html):
m = re.search(r'<div class="rating-count">\((?P<ratingcount>[^<]+)\)</div>', html)
return int(self._fix_count(m.group('ratingcount'))) if m is not None else 0
def _extract_view_count(self, html):
m = re.search(r'<li class="views">\s*<span class="count">(?P<viewcount>[^<]+)</span> Views\s*</li>', html)
return int(self._fix_count(m.group('viewcount'))) if m is not None else 0
def _extract_comment_count(self, html):
m = re.search(r'<li class="comments">\s*<a href="#comments">\s*<span class="count">(?P<commentcount>[^<]+)</span> Comments\s*</a>\s*</li>', html)
return int(self._fix_count(m.group('commentcount'))) if m is not None else 0
def _fix_count(self, count):
return int(str(count).replace(',', '')) if count is not None else None
def _extract_authors(self, html):
m = re.search(r'(?s)<li class="author">(.*?)</li>', html)
if m is None:
return None
return re.findall(r'<a href="/Niners/[^"]+">([^<]+)</a>', m.group(1))
def _extract_session_code(self, html):
m = re.search(r'<li class="code">\s*(?P<code>.+?)\s*</li>', html)
return m.group('code') if m is not None else None
def _extract_session_day(self, html):
m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html)
return m.group('day').strip() if m is not None else None
def _extract_session_room(self, html):
m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html)
return m.group('room') if m is not None else None
def _extract_session_speakers(self, html):
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
def _extract_content(self, html, content_path):
# Look for downloadable content
formats = self._formats_from_html(html)
slides = self._extract_slides(html)
zip_ = self._extract_zip(html)
# Nothing to download
if len(formats) == 0 and slides is None and zip_ is None:
self._downloader.report_warning('None of recording, slides or zip are available for %s' % content_path)
return
# Extract meta
title = self._extract_title(html)
description = self._extract_description(html)
thumbnail = self._og_search_thumbnail(html)
duration = self._extract_duration(html)
avg_rating = self._extract_avg_rating(html)
rating_count = self._extract_rating_count(html)
view_count = self._extract_view_count(html)
comment_count = self._extract_comment_count(html)
common = {
'_type': 'video',
'id': content_path,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'avg_rating': avg_rating,
'rating_count': rating_count,
'view_count': view_count,
'comment_count': comment_count,
}
result = []
if slides is not None:
d = common.copy()
d.update({'title': title + '-Slides', 'url': slides})
result.append(d)
if zip_ is not None:
d = common.copy()
d.update({'title': title + '-Zip', 'url': zip_})
result.append(d)
if len(formats) > 0:
d = common.copy()
d.update({'title': title, 'formats': formats})
result.append(d)
return result
def _extract_entry_item(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
if len(contents) > 1:
raise ExtractorError('Got more than one entry')
result = contents[0]
result['authors'] = self._extract_authors(html)
return result
def _extract_session(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
session_meta = {
'session_code': self._extract_session_code(html),
'session_day': self._extract_session_day(html),
'session_room': self._extract_session_room(html),
'session_speakers': self._extract_session_speakers(html),
}
for content in contents:
content.update(session_meta)
return self.playlist_result(contents)
def _extract_list(self, content_path):
rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS')
entries = [self.url_result(session_url.text, 'Channel9')
for session_url in rss.findall('./channel/item/link')]
title_text = rss.find('./channel/title').text
return self.playlist_result(entries, content_path, title_text)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
content_path = mobj.group('contentpath')
webpage = self._download_webpage(url, content_path, 'Downloading web page')
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
if page_type_m is not None:
page_type = page_type_m.group('pagetype')
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
elif page_type == 'Event':
return self._extract_list(content_path)
else:
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
else: # Assuming list
return self._extract_list(content_path)
| unlicense |
timothycrosley/WebBot | instant_templates/update_webbot_appengine/pygments/styles/vim.py | 26 | 1976 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| gpl-2.0 |
anirudhSK/chromium | chrome/test/functional/ispy/server/gs_bucket.py | 13 | 1932 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implementation of CloudBucket using Google Cloud Storage as the backend."""
import os
import sys
import cloudstorage
from common import cloud_bucket
class GoogleCloudStorageBucket(cloud_bucket.BaseCloudBucket):
"""Subclass of cloud_bucket.CloudBucket with actual GS commands."""
def __init__(self, bucket):
"""Initializes the bucket.
Args:
bucket: the name of the bucket to connect to.
"""
self.bucket = '/' + bucket
def _full_path(self, path):
return self.bucket + '/' + path.lstrip('/')
# override
def UploadFile(self, path, contents, content_type):
gs_file = cloudstorage.open(
self._full_path(path), 'w', content_type=content_type)
gs_file.write(contents)
gs_file.close()
# override
def DownloadFile(self, path):
try:
gs_file = cloudstorage.open(self._full_path(path), 'r')
r = gs_file.read()
gs_file.close()
except Exception as e:
raise Exception('%s: %s' % (self._full_path(path), str(e)))
return r
# override
def UpdateFile(self, path, contents):
if not self.FileExists(path):
raise cloud_bucket.FileNotFoundError
gs_file = cloudstorage.open(self._full_path(path), 'w')
gs_file.write(contents)
gs_file.close()
# override
def RemoveFile(self, path):
cloudstorage.delete(self._full_path(path))
# override
def FileExists(self, path):
try:
cloudstorage.stat(self._full_path(path))
except cloudstorage.NotFoundError:
return False
return True
# override
def GetImageURL(self, path):
return '/image?file_path=%s' % path
# override
def GetAllPaths(self, prefix):
return (f.filename[len(self.bucket) + 1:] for f in
cloudstorage.listbucket(self.bucket, prefix=prefix))
| bsd-3-clause |
adambrenecki/django | django/test/simple.py | 6 | 8799 | """
This module is pending deprecation as of Django 1.6 and will be removed in
version 1.8.
"""
from importlib import import_module
import json
import re
import unittest as real_unittest
import warnings
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test import runner
from django.test.utils import compare_xml, strip_quotes
# django.utils.unittest is deprecated, but so is django.test.simple,
# and the latter will be removed before the former.
from django.utils import unittest
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner',)
warnings.warn(
"The django.test.simple module and DjangoTestSuiteRunner are deprecated; "
"use django.test.runner.DiscoverRunner instead.",
DeprecationWarning)
# The module name for tests outside models.py
TEST_MODULE = 'tests'
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)",
lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"""
The entry method for doctest output checking. Defers to a sequence of
child checkers
"""
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"""
The default comparator provided by doctest - not perfect, but good for
most purposes
"""
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
try:
return compare_xml(want, got)
except Exception:
return False
def check_output_json(self, want, got, optionsflags):
"""
Tries to compare want and got as if they were JSON-encoded data
"""
want, got = strip_quotes(want, got)
try:
want_json = json.loads(want)
got_json = json.loads(got)
except Exception:
return False
return want_json == got_json
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
doctestOutputChecker = OutputChecker()
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def make_doctest(module):
return doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner,
)
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(make_doctest(app_module))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(make_doctest(test_module))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = make_doctest(module)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
class DjangoTestSuiteRunner(runner.DiscoverRunner):
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return runner.reorder_suite(suite, (unittest.TestCase,))
| bsd-3-clause |
ricardaw/pismdev | util/pism_config_editor.py | 5 | 7572 | #!/usr/bin/env python
# import modules:
## @package pism_config_editor
##
## A script simplifying creating configuration files to use with PISM's -config_override option.
##
## Does not take any command-line options; the only argument is a name of a
## NetCDF configuration file to start from. Run
## \verbatim
## pism_config_editor.py lib/pism_config.nc
## \endverbatim
## to edit lib/pism_config.nc or create a file based on lib/pism_config.nc
##
## \verbatim
## macbook:pism> pism_config_editor.py lib/pism_config.nc
## PISM config file editor: using attributes from 'pism_config' in 'lib/pism_config.nc'.
##
## # Please enter a parameter name or hit Return to save your changes.
## # You can also hit 'tab' for completions.
## >
## \endverbatim
## Next, start typing the name of a flag or parameter you want to change; hit [tab] to complete:
## \verbatim
## > sta[tab][tab]
## standard_gravity start_year
## > sta
## \endverbatim
## typing "n[tab][return]" produces:
## \verbatim
## > standard_gravity
##
## # Documentation: m s-2; acceleration due to gravity on Earth geoid
## # Current value: standard_gravity = 9.8100000000000005
## # New value: standard_gravity =
## \endverbatim
## enter the new value (10, for example), press [return]; you would see
## \verbatim
## # New value set: standard_gravity = 10.0
##
## ## List of changes so far:
## ## standard_gravity = 10.0
##
## # Please enter a parameter name or hit Return to save your changes.
## # You can also hit 'tab' for completions.
## >
## \endverbatim
##
## Now you can select a different parameter or hit [return] to save to a file:
## \verbatim
## Please enter the file name to save to or hit Return to save to the original file (lib/pism_config.nc).
## > g_equals_10.nc
## \endverbatim
## Next, press [return] if you edited a PISM config file containing \b all the
## parameters or type "pism_overrides[return]" to create a config to use with -config_override.
## \verbatim
## > pism_overrides
## # Created variable pism_overrides in g_equals_10.nc.
## Done.
## \endverbatim
import sys
from numpy import double
try:
import readline
except:
print "GNU readline library is not available."
sys.exit(0)
try:
from netCDF4 import Dataset as NC
except:
from netCDF3 import Dataset as NC
def list_completer(text, state, list):
"""Completes strings from the list 'list'. Skips documenting strings."""
matches = filter(lambda(x): (x.startswith(text) and not x.endswith("_doc")), list)
if (state >= len(matches)):
return None
else:
return matches[state]
def edit_attr(dict, attr):
"""Edits an attribute in the dictionary dict."""
completer = readline.get_completer()
readline.set_completer(None)
current_value = dict[attr]
try:
print "\n# Documentation: %s" % dict[attr + "_doc"]
except:
pass
print "# Current value: %s = %s" % (attr, str(current_value))
while True:
new_value = raw_input("# New value: %s = " % attr)
if new_value == "":
new_value = current_value
print "# Using the current value (%s)" % str(current_value)
break
try:
new_value = double(new_value) # try interpreting as a number
except:
pass # leave as a string
break
readline.set_completer(completer)
return new_value
def main_loop(dict):
changes = {}
while True:
print "\n# Please enter a parameter name or hit Return to save your changes.\n# You can also hit 'tab' for completions."
attr = raw_input("> ")
if attr == "":
break
try:
old_value = dict[attr]
new_value = edit_attr(dict, attr)
changes[attr] = new_value
if (old_value != new_value):
print "# New value set: %s = %s\n" % (attr, str(new_value))
except:
print "ERROR: attribute '%s' was not found." % attr
print "## List of changes so far:"
for each in changes.keys():
print "## %s = %s" % (each, str(changes[each]))
return changes
def read(filename):
"""Reads attributes from a file."""
try:
nc = NC(filename)
except:
print "ERROR: can't open %s" % filename
sys.exit(0)
names = ['pism_config', 'pism_overrides']
varname = None
var = None
for name in names:
try:
var = nc.variables[name]
varname = name
except:
pass
if var == None:
print "ERROR: can't find 'pism_config' or 'pism_overrides' in '%s'." % filename
sys.exit(0)
attrs = var.ncattrs()
dict = {}
for each in attrs:
dict[each] = getattr(var, each)
nc.close()
return (varname, dict)
def save(dict, changes, default_filename, default_varname):
"""Saves attributes stored in the dictionary changes, adding doc-strings from dict."""
readline.set_completer(None)
print "\nPlease enter the file name to save to or hit Return to save to the original file (%s)." % default_filename
filename = raw_input("> ")
if filename == "":
filename = default_filename
def varname_completer(text, state):
names = ['pism_config', 'pism_overrides']
matches = filter(lambda(x): x.startswith(text), names)
if state < 2:
return matches[state]
else:
return None
readline.set_completer(varname_completer)
print "# Please enter the variable name to use or hit Return to use '%s'." % default_varname
varname = raw_input("> ")
if varname == "":
varname = default_varname
try:
nc = NC(filename, 'a') # append
except:
try:
nc = NC(filename, 'w',format='NETCDF3_CLASSIC') # if not found, then create
except:
print "ERROR: can't open '%s'." % filename
return False
try:
var = nc.variables[varname]
except:
var = nc.createVariable(varname, 'b')
print "# Created variable %s in %s." % (varname, filename)
for each in changes.keys():
try:
doc = each + "_doc"
setattr(var, doc, dict[doc])
except:
pass
setattr(var, each, changes[each])
nc.close()
return True
from optparse import OptionParser
parser = OptionParser()
parser.usage = """Run "%prog config.nc"
to edit config.nc or create a new configuration file (such as an "overrides" file)
based on config.nc"""
parser.description = "This scrips simplifies creating a customized PISM configuration file."
(options, args) = parser.parse_args()
if (len(args) != 1):
print "Please specify an input file. Exiting..."
sys.exit(1)
# Get the input filename:
try:
filename = args[0]
except:
sys.exit(0)
# Read attributes:
varname, dict = read(filename)
print "PISM config file editor: using attributes from '%s' in '%s'." % (varname, filename)
# Set up tab completion:
def complete(text, state):
return list_completer(text, state, dict.keys())
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
# Process user input:
changes = main_loop(dict)
if changes == {}:
sys.exit(0)
# Save to a file:
while True:
result = save(dict, changes, filename, varname)
if result == True:
print "Done."
break
print "Do you want to try a different file name? [y/n]"
answer = raw_input()
if answer not in ["y", "Y", "yes", "Yes", "YES"]:
break
| gpl-2.0 |
smaty1/scrapy | scrapy/utils/response.py | 40 | 3133 | """
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import re
import weakref
import webbrowser
import tempfile
from twisted.web import http
from twisted.web.http import RESPONSES
from scrapy.utils.python import to_bytes
from w3lib import html
from scrapy.utils.decorators import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.body_as_unicode()[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url,
response.encoding)
return _baseurl_cache[response]
_noscript_re = re.compile(u'<noscript>.*?</noscript>', re.IGNORECASE | re.DOTALL)
_script_re = re.compile(u'<script.*?>.*?</script>', re.IGNORECASE | re.DOTALL)
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.body_as_unicode()[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url,
response.encoding)
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
>>> response_status_message(200)
'200 OK'
>>> response_status_message(404)
'404 Not Found'
"""
return '%s %s' % (status, http.responses.get(int(status)))
def response_httprepr(response):
"""Return raw HTTP representation (as bytes) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = b"HTTP/1.1 " + to_bytes(str(response.status)) + b" " + \
to_bytes(RESPONSES.get(response.status, b'')) + b"\r\n"
if response.headers:
s += response.headers.to_string() + b"\r\n"
s += b"\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = '<head><base href="%s">' % response.url
body = body.replace(b'<head>', to_bytes(repl))
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" %
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
| bsd-3-clause |
gsmartway/odoo | addons/account_sequence/account_sequence.py | 338 | 2534 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move(osv.osv):
_inherit = 'account.move'
_columns = {
'internal_sequence_number': fields.char('Internal Number',
readonly=True, copy=False,
help='Internal Sequence Number'),
}
def post(self, cr, uid, ids, context=None):
obj_sequence = self.pool.get('ir.sequence')
res = super(account_move, self).post(cr, uid, ids, context=context)
seq_no = False
for move in self.browse(cr, uid, ids, context=context):
if move.journal_id.internal_sequence_id:
seq_no = obj_sequence.next_by_id(cr, uid, move.journal_id.internal_sequence_id.id, context=context)
if seq_no:
self.write(cr, uid, [move.id], {'internal_sequence_number': seq_no})
return res
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'internal_sequence_id': fields.many2one('ir.sequence', 'Internal Sequence', help="This sequence will be used to maintain the internal number for the journal entries related to this journal."),
}
class account_move_line(osv.osv):
_inherit = "account.move.line"
_columns = {
'internal_sequence_number': fields.related('move_id','internal_sequence_number', type='char', relation='account.move', help='Internal Sequence Number', string='Internal Number'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223119/2015cda | static/Brython3.1.1-20150328-091302/Lib/test/pystone.py | 718 | 7379 | #! /usr/bin/python3.3
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = [x[:] for x in [Array1Glob]*51]
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print(msg, end=' ', file=sys.stderr)
print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
| gpl-3.0 |
t794104/ansible | test/units/modules/storage/netapp/test_na_ontap_job_schedule.py | 43 | 8947 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests for Ansible module: na_ontap_job_schedule '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_job_schedule \
import NetAppONTAPJob as job_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'job':
xml = self.build_job_schedule_cron_info(self.params)
elif self.kind == 'job_multiple':
xml = self.build_job_schedule_multiple_cron_info(self.params)
# TODO: mock invoke_elem for autosupport calls
elif self.kind == 'vserver':
xml = self.build_vserver_info()
self.xml_out = xml
return xml
def autosupport_log(self):
''' Mock autosupport log method, returns None '''
return None
@staticmethod
def build_job_schedule_cron_info(job_details):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'job-schedule-cron-info': {
'job-schedule-name': job_details['name'],
'job-schedule-cron-minute': {
'cron-minute': job_details['minutes']
}
}
}
}
xml.translate_struct(attributes)
return xml
@staticmethod
def build_job_schedule_multiple_cron_info(job_details):
''' build xml data for vserser-info '''
print("CALLED MULTIPLE BUILD")
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'job-schedule-cron-info': {
'job-schedule-name': job_details['name'],
'job-schedule-cron-minute': [
{'cron-minute': '25'},
{'cron-minute': '35'}
],
'job-schedule-cron-month': [
{'cron-month': '5'},
{'cron-month': '10'}
]
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' Unit tests for na_ontap_job_schedule '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_job = {
'name': 'test_job',
'minutes': '25'
}
def mock_args(self):
return {
'name': self.mock_job['name'],
'job_minutes': [self.mock_job['minutes']],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_job_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_job_schedule object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_job_schedule object
"""
job_obj = job_module()
job_obj.autosupport_log = Mock(return_value=None)
if kind is None:
job_obj.server = MockONTAPConnection()
else:
job_obj.server = MockONTAPConnection(kind=kind, data=self.mock_job)
return job_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
job_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_job(self):
''' Test if get_job_schedule returns None for non-existent job '''
set_module_args(self.mock_args())
result = self.get_job_mock_object().get_job_schedule()
assert result is None
def test_get_existing_job(self):
''' Test if get_job_schedule retuns job details for existing job '''
data = self.mock_args()
set_module_args(data)
result = self.get_job_mock_object('job').get_job_schedule()
assert result['name'] == self.mock_job['name']
assert result['job_minutes'] == data['job_minutes']
def test_get_existing_job_multiple_minutes(self):
''' Test if get_job_schedule retuns job details for existing job '''
set_module_args(self.mock_args())
result = self.get_job_mock_object('job_multiple').get_job_schedule()
print(str(result))
assert result['name'] == self.mock_job['name']
assert result['job_minutes'] == ['25', '35']
assert result['job_months'] == ['5', '10']
def test_create_error_missing_param(self):
''' Test if create throws an error if job_minutes is not specified'''
data = self.mock_args()
del data['job_minutes']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_job_mock_object('job').create_job_schedule()
msg = 'Error: missing required parameter job_minutes for create'
assert exc.value.args[0]['msg'] == msg
def test_successful_create(self):
''' Test successful create '''
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_job_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_job_mock_object('job').apply()
assert not exc.value.args[0]['changed']
def test_successful_delete(self):
''' Test delete existing job '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_job_mock_object('job').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
''' Test delete idempotency '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_job_mock_object().apply()
assert not exc.value.args[0]['changed']
def test_successful_modify(self):
''' Test successful modify job_minutes '''
data = self.mock_args()
data['job_minutes'] = ['20']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_job_mock_object('job').apply()
assert exc.value.args[0]['changed']
def test_modify_idempotency(self):
''' Test modify idempotency '''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_job_mock_object('job').apply()
assert not exc.value.args[0]['changed']
| gpl-3.0 |
hchen1202/django-react | virtualenv/lib/python3.6/site-packages/pyparsing.py | 77 | 231039 | # module pyparsing.py
#
# Copyright (c) 2003-2016 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.2.0"
__versionTime__ = "06 Mar 2017 02:06 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex(r'&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
r"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, collections.Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted); if the parse action
is passed a tuple or list of functions, this is equivalent to
calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
| mit |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/distutils/command/register.py | 175 | 11839 | """distutils.command.register
Implements the Distutils 'register' command (register with the repository).
"""
# created 2002/10/21, Richard Jones
__revision__ = "$Id$"
import urllib2
import getpass
import urlparse
from warnings import warn
from distutils.core import PyPIRCCommand
from distutils import log
class register(PyPIRCCommand):
description = ("register the distribution with the Python package index")
user_options = PyPIRCCommand.user_options + [
('list-classifiers', None,
'list the valid Trove classifiers'),
('strict', None ,
'Will stop the registering if the meta-data are not fully compliant')
]
boolean_options = PyPIRCCommand.boolean_options + [
'verify', 'list-classifiers', 'strict']
sub_commands = [('check', lambda self: True)]
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.list_classifiers = 0
self.strict = 0
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
# setting options for the `check` subcommand
check_options = {'strict': ('register', self.strict),
'restructuredtext': ('register', 1)}
self.distribution.command_options['check'] = check_options
def run(self):
self.finalize_options()
self._set_config()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.dry_run:
self.verify_metadata()
elif self.list_classifiers:
self.classifiers()
else:
self.send_metadata()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.register.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.strict = self.strict
check.restructuredtext = 1
check.run()
def _set_config(self):
''' Reads the configuration file and set attributes.
'''
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
self.has_config = True
else:
if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
raise ValueError('%s not found in .pypirc' % self.repository)
if self.repository == 'pypi':
self.repository = self.DEFAULT_REPOSITORY
self.has_config = False
def classifiers(self):
''' Fetch the list of classifiers from the server.
'''
response = urllib2.urlopen(self.repository+'?:action=list_classifiers')
log.info(response.read())
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
log.info('Server response (%s): %s' % (code, result))
def send_metadata(self):
''' Send the metadata to the package index server.
Well, do the following:
1. figure who the user is, and then
2. send the data as a Basic auth'ed POST.
First we try to read the username/password from $HOME/.pypirc,
which is a ConfigParser-formatted file with a section
[distutils] containing username and password entries (both
in clear text). Eg:
[distutils]
index-servers =
pypi
[pypi]
username: fred
password: sekrit
Otherwise, to figure who the user is, we offer the user three
choices:
1. use existing login,
2. register as a new user, or
3. set the password to a random string and email the user.
'''
# see if we can short-cut and get the username/password from the
# config
if self.has_config:
choice = '1'
username = self.username
password = self.password
else:
choice = 'x'
username = password = ''
# get the user's login info
choices = '1 2 3 4'.split()
while choice not in choices:
self.announce('''\
We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]: ''', log.INFO)
choice = raw_input()
if not choice:
choice = '1'
elif choice not in choices:
print 'Please choose one of the four options!'
if choice == '1':
# get the username and password
while not username:
username = raw_input('Username: ')
while not password:
password = getpass.getpass('Password: ')
# set up the authentication
auth = urllib2.HTTPPasswordMgr()
host = urlparse.urlparse(self.repository)[1]
auth.add_password(self.realm, host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
self.announce('Server response (%s): %s' % (code, result),
log.INFO)
# possibly save the login
if code == 200:
if self.has_config:
# sharing the password in the distribution instance
# so the upload command can reuse it
self.distribution.password = password
else:
self.announce(('I can store your PyPI login so future '
'submissions will be faster.'), log.INFO)
self.announce('(the login will be stored in %s)' % \
self._get_rc_file(), log.INFO)
choice = 'X'
while choice.lower() not in 'yn':
choice = raw_input('Save your login (y/N)?')
if not choice:
choice = 'n'
if choice.lower() == 'y':
self._store_pypirc(username, password)
elif choice == '2':
data = {':action': 'user'}
data['name'] = data['password'] = data['email'] = ''
data['confirm'] = None
while not data['name']:
data['name'] = raw_input('Username: ')
while data['password'] != data['confirm']:
while not data['password']:
data['password'] = getpass.getpass('Password: ')
while not data['confirm']:
data['confirm'] = getpass.getpass(' Confirm: ')
if data['password'] != data['confirm']:
data['password'] = ''
data['confirm'] = None
print "Password and confirm don't match!"
while not data['email']:
data['email'] = raw_input(' EMail: ')
code, result = self.post_to_server(data)
if code != 200:
log.info('Server response (%s): %s' % (code, result))
else:
log.info('You will receive an email shortly.')
log.info(('Follow the instructions in it to '
'complete registration.'))
elif choice == '3':
data = {':action': 'password_reset'}
data['email'] = ''
while not data['email']:
data['email'] = raw_input('Your email address: ')
code, result = self.post_to_server(data)
log.info('Server response (%s): %s' % (code, result))
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = self.distribution.metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': meta.get_name(),
'version': meta.get_version(),
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
if data['provides'] or data['requires'] or data['obsoletes']:
data['metadata_version'] = '1.1'
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
if 'name' in data:
self.announce('Registering %s to %s' % (data['name'],
self.repository),
log.INFO)
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
chunks = []
for key, value in data.items():
# handle multiple entries for the same name
if type(value) not in (type([]), type( () )):
value = [value]
for value in value:
chunks.append(sep_boundary)
chunks.append('\nContent-Disposition: form-data; name="%s"'%key)
chunks.append("\n\n")
chunks.append(value)
if value and value[-1] == '\r':
chunks.append('\n') # write an extra newline (lurve Macs)
chunks.append(end_boundary)
chunks.append("\n")
# chunks may be bytes (str) or unicode objects that we need to encode
body = []
for chunk in chunks:
if isinstance(chunk, unicode):
body.append(chunk.encode('utf-8'))
else:
body.append(chunk)
body = ''.join(body)
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib2.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib2.HTTPError, e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib2.URLError, e:
result = 500, str(e)
else:
if self.show_response:
data = result.read()
result = 200, 'OK'
if self.show_response:
dashes = '-' * 75
self.announce('%s%s%s' % (dashes, data, dashes))
return result
| gpl-2.0 |
IndonesiaX/edx-platform | lms/djangoapps/ccx/tests/test_models.py | 27 | 8934 | """
tests for the models
"""
from datetime import datetime, timedelta
from django.utils.timezone import UTC
from mock import patch
from nose.plugins.attrib import attr
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory,
)
from util.tests.test_date_utils import fake_ugettext
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import (
CourseFactory,
check_mongo_calls
)
from .factories import (
CcxFactory,
)
from ..overrides import override_field_for_ccx
@attr('shard_1')
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(UTC())
self.set_ccx_override('start', expected)
actual = self.ccx.start # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(UTC())
self.set_ccx_override('start', now)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement, no-member
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due # pylint: disable=no-member
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
actual = self.ccx.due # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement, no-member
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
# ensure that the expected localized format will be found by the i18n
# service
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_start_datetime_short_date(self):
"""verify that the start date for a ccx formats properly by default"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_start_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_end_datetime_short_date(self):
"""verify that the end date for a ccx formats properly by default"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_no_due_date(self):
"""verify that without a due date, the end date is an empty string"""
expected = ''
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
def test_ccx_max_student_enrollment_correct(self):
"""
Verify the override value for max_student_enrollments_allowed
"""
expected = 200
self.set_ccx_override('max_student_enrollments_allowed', expected)
actual = self.ccx.max_student_enrollments_allowed # pylint: disable=no-member
self.assertEqual(expected, actual)
| agpl-3.0 |
jooking/closure-library | closure/bin/labs/code/generate_jsdoc.py | 222 | 4318 | #!/usr/bin/env python
#
# Copyright 2013 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to insert JsDoc before a function.
This script attempts to find the first function passed in to stdin, generate
JSDoc for it (with argument names and possibly return value), and inject
it in the string. This is intended to be used as a subprocess by editors
such as emacs and vi.
"""
import re
import sys
# Matches a typical Closure-style function definition.
_FUNCTION_REGEX = re.compile(r"""
# Start of line
^
# Indentation
(?P<indentation>[ ]*)
# Identifier (handling split across line)
(?P<identifier>\w+(\s*\.\s*\w+)*)
# "= function"
\s* = \s* function \s*
# opening paren
\(
# Function arguments
(?P<arguments>(?:\s|\w+|,)*)
# closing paren
\)
# opening bracket
\s* {
""", re.MULTILINE | re.VERBOSE)
def _MatchFirstFunction(script):
"""Match the first function seen in the script."""
return _FUNCTION_REGEX.search(script)
def _ParseArgString(arg_string):
"""Parse an argument string (inside parens) into parameter names."""
for arg in arg_string.split(','):
arg = arg.strip()
if arg:
yield arg
def _ExtractFunctionBody(script, indentation=0):
"""Attempt to return the function body."""
# Real extraction would require a token parser and state machines.
# We look for first bracket at the same level of indentation.
regex_str = r'{(.*?)^[ ]{%d}}' % indentation
function_regex = re.compile(regex_str, re.MULTILINE | re.DOTALL)
match = function_regex.search(script)
if match:
return match.group(1)
def _ContainsReturnValue(function_body):
"""Attempt to determine if the function body returns a value."""
return_regex = re.compile(r'\breturn\b[^;]')
# If this matches, we assume they're returning something.
return bool(return_regex.search(function_body))
def _InsertString(original_string, inserted_string, index):
"""Insert a string into another string at a given index."""
return original_string[0:index] + inserted_string + original_string[index:]
def _GenerateJsDoc(args, return_val=False):
"""Generate JSDoc for a function.
Args:
args: A list of names of the argument.
return_val: Whether the function has a return value.
Returns:
The JSDoc as a string.
"""
lines = []
lines.append('/**')
lines += [' * @param {} %s' % arg for arg in args]
if return_val:
lines.append(' * @return')
lines.append(' */')
return '\n'.join(lines) + '\n'
def _IndentString(source_string, indentation):
"""Indent string some number of characters."""
lines = [(indentation * ' ') + line
for line in source_string.splitlines(True)]
return ''.join(lines)
def InsertJsDoc(script):
"""Attempt to insert JSDoc for the first seen function in the script.
Args:
script: The script, as a string.
Returns:
Returns the new string if function was found and JSDoc inserted. Otherwise
returns None.
"""
match = _MatchFirstFunction(script)
if not match:
return
# Add argument flags.
args_string = match.group('arguments')
args = _ParseArgString(args_string)
start_index = match.start(0)
function_to_end = script[start_index:]
lvalue_indentation = len(match.group('indentation'))
return_val = False
function_body = _ExtractFunctionBody(function_to_end, lvalue_indentation)
if function_body:
return_val = _ContainsReturnValue(function_body)
jsdoc = _GenerateJsDoc(args, return_val)
if lvalue_indentation:
jsdoc = _IndentString(jsdoc, lvalue_indentation)
return _InsertString(script, jsdoc, start_index)
if __name__ == '__main__':
stdin_script = sys.stdin.read()
result = InsertJsDoc(stdin_script)
if result:
sys.stdout.write(result)
else:
sys.stdout.write(stdin_script)
| apache-2.0 |
shakamunyi/sahara | sahara/plugins/cdh/client/roles.py | 7 | 6525 | # Copyright (c) 2014 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The contents of this file are mainly copied from cm_api sources,
# released by Cloudera. Codes not used by Sahara CDH plugin are removed.
# You can find the original codes at
#
# https://github.com/cloudera/cm_api/tree/master/python/src/cm_api
#
# To satisfy the pep8 and python3 tests, we did some changes to the codes.
# We also change some importings to use Sahara inherited classes.
from sahara.plugins.cdh.client import types
ROLES_PATH = "/clusters/%s/services/%s/roles"
CM_ROLES_PATH = "/cm/service/roles"
def _get_roles_path(cluster_name, service_name):
if cluster_name:
return ROLES_PATH % (cluster_name, service_name)
else:
return CM_ROLES_PATH
def _get_role_path(cluster_name, service_name, role_name):
path = _get_roles_path(cluster_name, service_name)
return "%s/%s" % (path, role_name)
def create_role(resource_root,
service_name,
role_type,
role_name,
host_id,
cluster_name="default"):
"""Create a role
:param resource_root: The root Resource object.
:param service_name: Service name
:param role_type: Role type
:param role_name: Role name
:param cluster_name: Cluster name
:return: An ApiRole object
"""
apirole = ApiRole(resource_root, role_name, role_type,
types.ApiHostRef(resource_root, host_id))
return types.call(resource_root.post,
_get_roles_path(cluster_name, service_name),
ApiRole, True, data=[apirole])[0]
def get_role(resource_root, service_name, name, cluster_name="default"):
"""Lookup a role by name
:param resource_root: The root Resource object.
:param service_name: Service name
:param name: Role name
:param cluster_name: Cluster name
:return: An ApiRole object
"""
return _get_role(resource_root, _get_role_path(cluster_name,
service_name, name))
def _get_role(resource_root, path):
return types.call(resource_root.get, path, ApiRole)
def get_all_roles(resource_root, service_name, cluster_name="default",
view=None):
"""Get all roles
:param resource_root: The root Resource object.
:param service_name: Service name
:param cluster_name: Cluster name
:return: A list of ApiRole objects.
"""
return types.call(resource_root.get,
_get_roles_path(cluster_name, service_name), ApiRole,
True, params=(dict(view=view) if view else None))
def get_roles_by_type(resource_root, service_name, role_type,
cluster_name="default", view=None):
"""Get all roles of a certain type in a service
:param resource_root: The root Resource object.
:param service_name: Service name
:param role_type: Role type
:param cluster_name: Cluster name
:return: A list of ApiRole objects.
"""
roles = get_all_roles(resource_root, service_name, cluster_name, view)
return [r for r in roles if r.type == role_type]
def delete_role(resource_root, service_name, name, cluster_name="default"):
"""Delete a role by name
:param resource_root: The root Resource object.
:param service_name: Service name
:param name: Role name
:param cluster_name: Cluster name
:return: The deleted ApiRole object
"""
return types.call(resource_root.delete,
_get_role_path(cluster_name, service_name, name),
ApiRole)
class ApiRole(types.BaseApiResource):
_ATTRIBUTES = {
'name': None,
'type': None,
'hostRef': types.Attr(types.ApiHostRef),
'roleState': types.ROAttr(),
'healthSummary': types.ROAttr(),
'healthChecks': types.ROAttr(),
'serviceRef': types.ROAttr(types.ApiServiceRef),
'configStale': types.ROAttr(),
'configStalenessStatus': types.ROAttr(),
'haStatus': types.ROAttr(),
'roleUrl': types.ROAttr(),
'commissionState': types.ROAttr(),
'maintenanceMode': types.ROAttr(),
'maintenanceOwners': types.ROAttr(),
'roleConfigGroupRef': types.ROAttr(types.ApiRoleConfigGroupRef),
'zooKeeperServerMode': types.ROAttr(),
}
def __init__(self, resource_root, name=None, type=None, hostRef=None):
types.BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return ("<ApiRole>: %s (cluster: %s; service: %s)"
% (self.name, self.serviceRef.clusterName,
self.serviceRef.serviceName))
def _path(self):
return _get_role_path(self.serviceRef.clusterName,
self.serviceRef.serviceName,
self.name)
def _get_log(self, log):
path = "%s/logs/%s" % (self._path(), log)
return self._get_resource_root().get(path)
def get_commands(self, view=None):
"""Retrieve a list of running commands for this role
:param view: View to materialize ('full' or 'summary')
:return: A list of running commands.
"""
return self._get("commands", types.ApiCommand, True,
params=(dict(view=view) if view else None))
def get_config(self, view=None):
"""Retrieve the role's configuration
The 'summary' view contains strings as the dictionary values. The full
view contains types.ApiConfig instances as the values.
:param view: View to materialize ('full' or 'summary')
:return: Dictionary with configuration data.
"""
return self._get_config("config", view)
def update_config(self, config):
"""Update the role's configuration
:param config: Dictionary with configuration to update.
:return: Dictionary with updated configuration.
"""
return self._update_config("config", config)
| apache-2.0 |
tboyce021/home-assistant | homeassistant/components/life360/config_flow.py | 10 | 4048 | """Config flow to configure Life360 integration."""
from collections import OrderedDict
import logging
from life360 import Life360Error, LoginError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import CONF_AUTHORIZATION, DOMAIN
from .helpers import get_api
_LOGGER = logging.getLogger(__name__)
DOCS_URL = "https://www.home-assistant.io/integrations/life360"
@config_entries.HANDLERS.register(DOMAIN)
class Life360ConfigFlow(config_entries.ConfigFlow):
"""Life360 integration config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize."""
self._api = get_api()
self._username = vol.UNDEFINED
self._password = vol.UNDEFINED
@property
def configured_usernames(self):
"""Return tuple of configured usernames."""
entries = self.hass.config_entries.async_entries(DOMAIN)
if entries:
return (entry.data[CONF_USERNAME] for entry in entries)
return ()
async def async_step_user(self, user_input=None):
"""Handle a user initiated config flow."""
errors = {}
if user_input is not None:
self._username = user_input[CONF_USERNAME]
self._password = user_input[CONF_PASSWORD]
try:
# pylint: disable=no-value-for-parameter
vol.Email()(self._username)
authorization = await self.hass.async_add_executor_job(
self._api.get_authorization, self._username, self._password
)
except vol.Invalid:
errors[CONF_USERNAME] = "invalid_username"
except LoginError:
errors["base"] = "invalid_auth"
except Life360Error as error:
_LOGGER.error(
"Unexpected error communicating with Life360 server: %s", error
)
errors["base"] = "unknown"
else:
if self._username in self.configured_usernames:
errors["base"] = "already_configured"
else:
return self.async_create_entry(
title=self._username,
data={
CONF_USERNAME: self._username,
CONF_PASSWORD: self._password,
CONF_AUTHORIZATION: authorization,
},
description_placeholders={"docs_url": DOCS_URL},
)
data_schema = OrderedDict()
data_schema[vol.Required(CONF_USERNAME, default=self._username)] = str
data_schema[vol.Required(CONF_PASSWORD, default=self._password)] = str
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=errors,
description_placeholders={"docs_url": DOCS_URL},
)
async def async_step_import(self, user_input):
"""Import a config flow from configuration."""
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
try:
authorization = await self.hass.async_add_executor_job(
self._api.get_authorization, username, password
)
except LoginError:
_LOGGER.error("Invalid credentials for %s", username)
return self.async_abort(reason="invalid_auth")
except Life360Error as error:
_LOGGER.error(
"Unexpected error communicating with Life360 server: %s", error
)
return self.async_abort(reason="unknown")
return self.async_create_entry(
title=f"{username} (from configuration)",
data={
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_AUTHORIZATION: authorization,
},
)
| apache-2.0 |
roadmapper/ansible | lib/ansible/utils/path.py | 26 | 5225 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
from errno import EEXIST
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True, basedir=None):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
b_basedir = to_bytes(basedir, errors='surrogate_or_strict', nonstring='passthru')
if b_basedir is None:
b_basedir = to_bytes(os.getcwd(), errors='surrogate_or_strict')
elif os.path.isfile(b_basedir):
b_basedir = os.path.dirname(b_basedir)
b_final_path = os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))
if not os.path.isabs(b_final_path):
b_final_path = os.path.join(b_basedir, b_final_path)
if follow:
b_final_path = os.path.realpath(b_final_path)
return to_text(os.path.normpath(b_final_path), errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''
A *potentially insecure* way to ensure the existence of a directory chain. The "safe" in this function's name
refers only to its ability to ignore `EEXIST` in the case of multiple callers operating on the same part of
the directory chain. This function is not safe to use under world-writable locations when the first level of the
path to be created contains a predictable component. Always create a randomly-named element first if there is any
chance the parent directory might be world-writable (eg, /tmp) to prevent symlink hijacking and potential
disclosure or modification of sensitive file contents.
:arg path: A byte or text string representing a directory chain to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exist.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
def basedir(source):
""" returns directory for inventory or playbook """
source = to_bytes(source, errors='surrogate_or_strict')
dname = None
if os.path.isdir(source):
dname = source
elif source in [None, '', '.']:
dname = os.getcwd()
elif os.path.isfile(source):
dname = os.path.dirname(source)
if dname:
# don't follow symlinks for basedir, enables source re-use
dname = os.path.abspath(dname)
return to_text(dname, errors='surrogate_or_strict')
def cleanup_tmp_file(path, warn=False):
"""
Removes temporary file or directory. Optionally display a warning if unable
to remove the file or directory.
:arg path: Path to file or directory to be removed
:kwarg warn: Whether or not to display a warning when the file or directory
cannot be removed
"""
try:
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.unlink(path)
except Exception as e:
if warn:
# Importing here to avoid circular import
from ansible.utils.display import Display
display = Display()
display.display(u'Unable to remove temporary file {0}'.format(to_text(e)))
except Exception:
pass
| gpl-3.0 |
plissonf/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
entropy1337/infernal-twin | Modules/build/reportlab/src/reportlab/platypus/paraparser.py | 24 | 45904 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paraparser.py
__version__=''' $Id$ '''
__doc__='''The parser used to process markup within paragraphs'''
import string
import re
import sys
import os
import copy
import base64
from pprint import pprint as pp
import unicodedata
import reportlab.lib.sequencer
from reportlab.lib.abag import ABag
from reportlab.lib.utils import ImageReader, isPy3, annotateException, encode_label, asUnicode, asBytes, uniChr
from reportlab.lib.colors import toColor, white, black, red, Color
from reportlab.lib.fonts import tt2ps, ps2tt
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.units import inch,mm,cm,pica
if isPy3:
from html.parser import HTMLParser
from html.entities import name2codepoint
else:
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
_re_para = re.compile(r'^\s*<\s*para(?:\s+|>|/>)')
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.5 # fraction of font size that a sub script should be lowered
superFraction = 0.5 # fraction of font size that a super script should be raised
DEFAULT_INDEX_NAME='_indexAdd'
def _convnum(s, unit=1, allowRelative=True):
if s[0] in ('+','-') and allowRelative:
try:
return ('relative',int(s)*unit)
except ValueError:
return ('relative',float(s)*unit)
else:
try:
return int(s)*unit
except ValueError:
return float(s)*unit
def _num(s, unit=1, allowRelative=True):
"""Convert a string like '10cm' to an int or float (in points).
The default unit is point, but optionally you can use other
default units like mm.
"""
if s.endswith('cm'):
unit=cm
s = s[:-2]
if s.endswith('in'):
unit=inch
s = s[:-2]
if s.endswith('pt'):
unit=1
s = s[:-2]
if s.endswith('i'):
unit=inch
s = s[:-1]
if s.endswith('mm'):
unit=mm
s = s[:-2]
if s.endswith('pica'):
unit=pica
s = s[:-4]
return _convnum(s,unit,allowRelative)
def _numpct(s,unit=1,allowRelative=False):
if s.endswith('%'):
return _PCT(_convnum(s[:-1],allowRelative=allowRelative))
else:
return _num(s,unit,allowRelative)
class _PCT:
def __init__(self,v):
self._value = v*0.01
def normalizedValue(self,normalizer):
normalizer = normalizer or getattr(self,'_normalizer')
return normalizer*self._value
def _valignpc(s):
s = s.lower()
if s in ('baseline','sub','super','top','text-top','middle','bottom','text-bottom'):
return s
if s.endswith('%'):
n = _convnum(s[:-1])
if isinstance(n,tuple):
n = n[1]
return _PCT(n)
n = _num(s)
if isinstance(n,tuple):
n = n[1]
return n
def _autoLeading(x):
x = x.lower()
if x in ('','min','max','off'):
return x
raise ValueError('Invalid autoLeading=%r' % x )
def _align(s):
s = s.lower()
if s=='left': return TA_LEFT
elif s=='right': return TA_RIGHT
elif s=='justify': return TA_JUSTIFY
elif s in ('centre','center'): return TA_CENTER
else: raise ValueError('illegal alignment %r' % s)
def _bAnchor(s):
s = s.lower()
if not s in ('start','middle','end','numeric'):
raise ValueError('illegal bullet anchor %r' % s)
return s
_paraAttrMap = {'font': ('fontName', None),
'face': ('fontName', None),
'fontsize': ('fontSize', _num),
'size': ('fontSize', _num),
'leading': ('leading', _num),
'autoleading': ('autoLeading', _autoLeading),
'lindent': ('leftIndent', _num),
'rindent': ('rightIndent', _num),
'findent': ('firstLineIndent', _num),
'align': ('alignment', _align),
'spaceb': ('spaceBefore', _num),
'spacea': ('spaceAfter', _num),
'bfont': ('bulletFontName', None),
'bfontsize': ('bulletFontSize',_num),
'boffsety': ('bulletOffsetY',_num),
'bindent': ('bulletIndent',_num),
'bcolor': ('bulletColor',toColor),
'banchor': ('bulletAnchor',_bAnchor),
'color':('textColor',toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'bg':('backColor',toColor),
'fg': ('textColor',toColor),
}
_bulletAttrMap = {
'font': ('bulletFontName', None),
'face': ('bulletFontName', None),
'size': ('bulletFontSize',_num),
'fontsize': ('bulletFontSize',_num),
'offsety': ('bulletOffsetY',_num),
'indent': ('bulletIndent',_num),
'color': ('bulletColor',toColor),
'fg': ('bulletColor',toColor),
'anchor': ('bulletAnchor',_bAnchor),
}
#things which are valid font attributes
_fontAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
}
#things which are valid span attributes
_spanAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'style': ('style',None),
}
#things which are valid font attributes
_linkAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'dest': ('link', None),
'destination': ('link', None),
'target': ('link', None),
'href': ('link', None),
}
_anchorAttrMap = {'fontSize': ('fontSize', _num),
'fontName': ('fontName', None),
'name': ('name', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'href': ('href', None),
}
_imgAttrMap = {
'src': ('src', None),
'width': ('width',_numpct),
'height':('height',_numpct),
'valign':('valign',_valignpc),
}
_indexAttrMap = {
'name': ('name',None),
'item': ('item',None),
'offset': ('offset',None),
'format': ('format',None),
}
def _addAttributeNames(m):
K = list(m.keys())
for k in K:
n = m[k][0]
if n not in m: m[n] = m[k]
n = n.lower()
if n not in m: m[n] = m[k]
_addAttributeNames(_paraAttrMap)
_addAttributeNames(_fontAttrMap)
_addAttributeNames(_spanAttrMap)
_addAttributeNames(_bulletAttrMap)
_addAttributeNames(_anchorAttrMap)
_addAttributeNames(_linkAttrMap)
def _applyAttributes(obj, attr):
for k, v in attr.items():
if isinstance(v,(list,tuple)) and v[0]=='relative':
if hasattr(obj, k):
v = v[1]+getattr(obj,k)
else:
v = v[1]
setattr(obj,k,v)
#Named character entities intended to be supported from the special font
#with additions suggested by Christoph Zwerschke who also suggested the
#numeric entity names that follow.
greeks = {
'Aacute': u'\xc1',
'aacute': u'\xe1',
'Acirc': u'\xc2',
'acirc': u'\xe2',
'acute': u'\xb4',
'AElig': u'\xc6',
'aelig': u'\xe6',
'Agrave': u'\xc0',
'agrave': u'\xe0',
'alefsym': u'\u2135',
'Alpha': u'\u0391',
'alpha': u'\u03b1',
'and': u'\u2227',
'ang': u'\u2220',
'Aring': u'\xc5',
'aring': u'\xe5',
'asymp': u'\u2248',
'Atilde': u'\xc3',
'atilde': u'\xe3',
'Auml': u'\xc4',
'auml': u'\xe4',
'bdquo': u'\u201e',
'Beta': u'\u0392',
'beta': u'\u03b2',
'brvbar': u'\xa6',
'bull': u'\u2022',
'cap': u'\u2229',
'Ccedil': u'\xc7',
'ccedil': u'\xe7',
'cedil': u'\xb8',
'cent': u'\xa2',
'Chi': u'\u03a7',
'chi': u'\u03c7',
'circ': u'\u02c6',
'clubs': u'\u2663',
'cong': u'\u2245',
'copy': u'\xa9',
'crarr': u'\u21b5',
'cup': u'\u222a',
'curren': u'\xa4',
'dagger': u'\u2020',
'Dagger': u'\u2021',
'darr': u'\u2193',
'dArr': u'\u21d3',
'deg': u'\xb0',
'delta': u'\u03b4',
'Delta': u'\u2206',
'diams': u'\u2666',
'divide': u'\xf7',
'Eacute': u'\xc9',
'eacute': u'\xe9',
'Ecirc': u'\xca',
'ecirc': u'\xea',
'Egrave': u'\xc8',
'egrave': u'\xe8',
'empty': u'\u2205',
'emsp': u'\u2003',
'ensp': u'\u2002',
'Epsilon': u'\u0395',
'epsilon': u'\u03b5',
'epsiv': u'\u03b5',
'equiv': u'\u2261',
'Eta': u'\u0397',
'eta': u'\u03b7',
'ETH': u'\xd0',
'eth': u'\xf0',
'Euml': u'\xcb',
'euml': u'\xeb',
'euro': u'\u20ac',
'exist': u'\u2203',
'fnof': u'\u0192',
'forall': u'\u2200',
'frac12': u'\xbd',
'frac14': u'\xbc',
'frac34': u'\xbe',
'frasl': u'\u2044',
'Gamma': u'\u0393',
'gamma': u'\u03b3',
'ge': u'\u2265',
'harr': u'\u2194',
'hArr': u'\u21d4',
'hearts': u'\u2665',
'hellip': u'\u2026',
'Iacute': u'\xcd',
'iacute': u'\xed',
'Icirc': u'\xce',
'icirc': u'\xee',
'iexcl': u'\xa1',
'Igrave': u'\xcc',
'igrave': u'\xec',
'image': u'\u2111',
'infin': u'\u221e',
'int': u'\u222b',
'Iota': u'\u0399',
'iota': u'\u03b9',
'iquest': u'\xbf',
'isin': u'\u2208',
'Iuml': u'\xcf',
'iuml': u'\xef',
'Kappa': u'\u039a',
'kappa': u'\u03ba',
'Lambda': u'\u039b',
'lambda': u'\u03bb',
'lang': u'\u2329',
'laquo': u'\xab',
'larr': u'\u2190',
'lArr': u'\u21d0',
'lceil': u'\uf8ee',
'ldquo': u'\u201c',
'le': u'\u2264',
'lfloor': u'\uf8f0',
'lowast': u'\u2217',
'loz': u'\u25ca',
'lrm': u'\u200e',
'lsaquo': u'\u2039',
'lsquo': u'\u2018',
'macr': u'\xaf',
'mdash': u'\u2014',
'micro': u'\xb5',
'middot': u'\xb7',
'minus': u'\u2212',
'mu': u'\xb5',
'Mu': u'\u039c',
'nabla': u'\u2207',
'nbsp': u'\xa0',
'ndash': u'\u2013',
'ne': u'\u2260',
'ni': u'\u220b',
'notin': u'\u2209',
'not': u'\xac',
'nsub': u'\u2284',
'Ntilde': u'\xd1',
'ntilde': u'\xf1',
'Nu': u'\u039d',
'nu': u'\u03bd',
'Oacute': u'\xd3',
'oacute': u'\xf3',
'Ocirc': u'\xd4',
'ocirc': u'\xf4',
'OElig': u'\u0152',
'oelig': u'\u0153',
'Ograve': u'\xd2',
'ograve': u'\xf2',
'oline': u'\uf8e5',
'omega': u'\u03c9',
'Omega': u'\u2126',
'Omicron': u'\u039f',
'omicron': u'\u03bf',
'oplus': u'\u2295',
'ordf': u'\xaa',
'ordm': u'\xba',
'or': u'\u2228',
'Oslash': u'\xd8',
'oslash': u'\xf8',
'Otilde': u'\xd5',
'otilde': u'\xf5',
'otimes': u'\u2297',
'Ouml': u'\xd6',
'ouml': u'\xf6',
'para': u'\xb6',
'part': u'\u2202',
'permil': u'\u2030',
'perp': u'\u22a5',
'phis': u'\u03c6',
'Phi': u'\u03a6',
'phi': u'\u03d5',
'piv': u'\u03d6',
'Pi': u'\u03a0',
'pi': u'\u03c0',
'plusmn': u'\xb1',
'pound': u'\xa3',
'prime': u'\u2032',
'Prime': u'\u2033',
'prod': u'\u220f',
'prop': u'\u221d',
'Psi': u'\u03a8',
'psi': u'\u03c8',
'radic': u'\u221a',
'rang': u'\u232a',
'raquo': u'\xbb',
'rarr': u'\u2192',
'rArr': u'\u21d2',
'rceil': u'\uf8f9',
'rdquo': u'\u201d',
'real': u'\u211c',
'reg': u'\xae',
'rfloor': u'\uf8fb',
'Rho': u'\u03a1',
'rho': u'\u03c1',
'rlm': u'\u200f',
'rsaquo': u'\u203a',
'rsquo': u'\u2019',
'sbquo': u'\u201a',
'Scaron': u'\u0160',
'scaron': u'\u0161',
'sdot': u'\u22c5',
'sect': u'\xa7',
'shy': u'\xad',
'sigmaf': u'\u03c2',
'sigmav': u'\u03c2',
'Sigma': u'\u03a3',
'sigma': u'\u03c3',
'sim': u'\u223c',
'spades': u'\u2660',
'sube': u'\u2286',
'sub': u'\u2282',
'sum': u'\u2211',
'sup1': u'\xb9',
'sup2': u'\xb2',
'sup3': u'\xb3',
'supe': u'\u2287',
'sup': u'\u2283',
'szlig': u'\xdf',
'Tau': u'\u03a4',
'tau': u'\u03c4',
'there4': u'\u2234',
'thetasym': u'\u03d1',
'thetav': u'\u03d1',
'Theta': u'\u0398',
'theta': u'\u03b8',
'thinsp': u'\u2009',
'THORN': u'\xde',
'thorn': u'\xfe',
'tilde': u'\u02dc',
'times': u'\xd7',
'trade': u'\uf8ea',
'Uacute': u'\xda',
'uacute': u'\xfa',
'uarr': u'\u2191',
'uArr': u'\u21d1',
'Ucirc': u'\xdb',
'ucirc': u'\xfb',
'Ugrave': u'\xd9',
'ugrave': u'\xf9',
'uml': u'\xa8',
'upsih': u'\u03d2',
'Upsilon': u'\u03a5',
'upsilon': u'\u03c5',
'Uuml': u'\xdc',
'uuml': u'\xfc',
'weierp': u'\u2118',
'Xi': u'\u039e',
'xi': u'\u03be',
'Yacute': u'\xdd',
'yacute': u'\xfd',
'yen': u'\xa5',
'yuml': u'\xff',
'Yuml': u'\u0178',
'Zeta': u'\u0396',
'zeta': u'\u03b6',
'zwj': u'\u200d',
'zwnj': u'\u200c',
}
known_entities = dict([(k,uniChr(v)) for k,v in name2codepoint.items()])
for k in greeks:
if k not in known_entities:
known_entities[k] = greeks[k]
f = isPy3 and asBytes or asUnicode
K = list(known_entities.keys())
for k in K:
known_entities[f(k)] = known_entities[k]
del k, f, K
#------------------------------------------------------------------------
class ParaFrag(ABag):
"""class ParaFrag contains the intermediate representation of string
segments as they are being parsed by the ParaParser.
fontname, fontSize, rise, textColor, cbDefn
"""
_greek2Utf8=None
def _greekConvert(data):
global _greek2Utf8
if not _greek2Utf8:
from reportlab.pdfbase.rl_codecs import RL_Codecs
import codecs
#our decoding map
dm = codecs.make_identity_dict(range(32,256))
for k in range(0,32):
dm[k] = None
dm.update(RL_Codecs._RL_Codecs__rl_codecs_data['symbol'][0])
_greek2Utf8 = {}
for k,v in dm.items():
if not v:
u = '\0'
else:
if isPy3:
u = chr(v)
else:
u = unichr(v).encode('utf8')
_greek2Utf8[chr(k)] = u
return ''.join(map(_greek2Utf8.__getitem__,data))
#------------------------------------------------------------------
# !!! NOTE !!! THIS TEXT IS NOW REPLICATED IN PARAGRAPH.PY !!!
# The ParaFormatter will be able to format the following
# tags:
# < /b > - bold
# < /i > - italics
# < u > < /u > - underline
# < strike > < /strike > - strike through
# < super > < /super > - superscript
# < sup > < /sup > - superscript
# < sub > < /sub > - subscript
# <font name=fontfamily/fontname color=colorname size=float>
# <span name=fontfamily/fontname color=colorname backcolor=colorname size=float style=stylename>
# < bullet > </bullet> - bullet text (at head of para only)
# <onDraw name=callable label="a label"/>
# <index [name="callablecanvasattribute"] label="a label"/>
# <link>link text</link>
# attributes of links
# size/fontSize=num
# name/face/fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# dest/destination/target/href/link=target
# <a>anchor text</a>
# attributes of anchors
# fontSize=num
# fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# href=href
# <a name="anchorpoint"/>
# <unichar name="unicode character name"/>
# <unichar value="unicode code point"/>
# <img src="path" width="1in" height="1in" valign="bottom"/>
# width="w%" --> fontSize*w/100 idea from Roberto Alsina
# height="h%" --> linewidth*h/100 <ralsina@netmanagers.com.ar>
# <greek> - </greek>
#
# The whole may be surrounded by <para> </para> tags
#
# It will also be able to handle any MathML specified Greek characters.
#------------------------------------------------------------------
class ParaParser(HTMLParser):
#----------------------------------------------------------
# First we will define all of the xml tag handler functions.
#
# start_<tag>(attributes)
# end_<tag>()
#
# While parsing the xml ParaFormatter will call these
# functions to handle the string formatting tags.
# At the start of each tag the corresponding field will
# be set to 1 and at the end tag the corresponding field will
# be set to 0. Then when handle_data is called the options
# for that data will be aparent by the current settings.
#----------------------------------------------------------
def __getattr__( self, attrName ):
"""This way we can handle <TAG> the same way as <tag> (ignoring case)."""
if attrName!=attrName.lower() and attrName!="caseSensitive" and not self.caseSensitive and \
(attrName.startswith("start_") or attrName.startswith("end_")):
return getattr(self,attrName.lower())
raise AttributeError(attrName)
#### bold
def start_b( self, attributes ):
self._push('b',bold=1)
def end_b( self ):
self._pop('b')
def start_strong( self, attributes ):
self._push('strong',bold=1)
def end_strong( self ):
self._pop('strong')
#### italics
def start_i( self, attributes ):
self._push('i',italic=1)
def end_i( self ):
self._pop('i')
def start_em( self, attributes ):
self._push('em', italic=1)
def end_em( self ):
self._pop('em')
#### underline
def start_u( self, attributes ):
self._push('u',underline=1)
def end_u( self ):
self._pop('u')
#### strike
def start_strike( self, attributes ):
self._push('strike',strike=1)
def end_strike( self ):
self._pop('strike')
#### link
def start_link(self, attributes):
self._push('link',**self.getAttributes(attributes,_linkAttrMap))
def end_link(self):
if self._pop('link').link is None:
raise ValueError('<link> has no target or href')
#### anchor
def start_a(self, attributes):
A = self.getAttributes(attributes,_anchorAttrMap)
name = A.get('name',None)
if name is not None:
name = name.strip()
if not name:
self._syntax_error('<a name="..."/> anchor variant requires non-blank name')
if len(A)>1:
self._syntax_error('<a name="..."/> anchor variant only allows name attribute')
A = dict(name=A['name'])
A['_selfClosingTag'] = 'anchor'
else:
href = A.get('href','').strip()
A['link'] = href #convert to our link form
A.pop('href',None)
self._push('a',**A)
def end_a(self):
frag = self._stack[-1]
sct = getattr(frag,'_selfClosingTag','')
if sct:
if not (sct=='anchor' and frag.name):
raise ValueError('Parser failure in <a/>')
defn = frag.cbDefn = ABag()
defn.label = defn.kind = 'anchor'
defn.name = frag.name
del frag.name, frag._selfClosingTag
self.handle_data('')
self._pop('a')
else:
if self._pop('a').link is None:
raise ValueError('<link> has no href')
def start_img(self,attributes):
A = self.getAttributes(attributes,_imgAttrMap)
if not A.get('src'):
self._syntax_error('<img> needs src attribute')
A['_selfClosingTag'] = 'img'
self._push('img',**A)
def end_img(self):
frag = self._stack[-1]
if not getattr(frag,'_selfClosingTag',''):
raise ValueError('Parser failure in <img/>')
defn = frag.cbDefn = ABag()
defn.kind = 'img'
defn.src = getattr(frag,'src',None)
defn.image = ImageReader(defn.src)
size = defn.image.getSize()
defn.width = getattr(frag,'width',size[0])
defn.height = getattr(frag,'height',size[1])
defn.valign = getattr(frag,'valign','bottom')
del frag._selfClosingTag
self.handle_data('')
self._pop('img')
#### super script
def start_super( self, attributes ):
self._push('super',super=1)
def end_super( self ):
self._pop('super')
def start_sup( self, attributes ):
self._push('sup',super=1)
def end_sup( self ):
self._pop('sup')
#### sub script
def start_sub( self, attributes ):
self._push('sub',sub=1)
def end_sub( self ):
self._pop('sub')
#### greek script
#### add symbol encoding
def handle_charref(self, name):
try:
if name[0]=='x':
n = int(name[1:],16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
self.handle_data(uniChr(n)) #.encode('utf8'))
def syntax_error(self,lineno,message):
self._syntax_error(message)
def _syntax_error(self,message):
if message[:10]=="attribute " and message[-17:]==" value not quoted": return
self.errors.append(message)
def start_greek(self, attr):
self._push('greek',greek=1)
def end_greek(self):
self._pop('greek')
def start_unichar(self, attr):
if 'name' in attr:
if 'code' in attr:
self._syntax_error('<unichar/> invalid with both name and code attributes')
try:
v = unicodedata.lookup(attr['name'])
except KeyError:
self._syntax_error('<unichar/> invalid name attribute\n"%s"' % ascii(attr['name']))
v = '\0'
elif 'code' in attr:
try:
v = int(eval(attr['code']))
v = chr(v) if isPy3 else unichr(v)
except:
self._syntax_error('<unichar/> invalid code attribute %s' % ascii(attr['code']))
v = '\0'
else:
v = None
if attr:
self._syntax_error('<unichar/> invalid attribute %s' % list(attr.keys())[0])
if v is not None:
self.handle_data(v)
self._push('unichar',_selfClosingTag='unichar')
def end_unichar(self):
self._pop('unichar')
def start_font(self,attr):
A = self.getAttributes(attr,_spanAttrMap)
if 'fontName' in A:
A['fontName'], A['bold'], A['italic'] = ps2tt(A['fontName'])
self._push('font',**A)
def end_font(self):
self._pop('font')
def start_span(self,attr):
A = self.getAttributes(attr,_spanAttrMap)
if 'style' in A:
style = self.findSpanStyle(A.pop('style'))
D = {}
for k in 'fontName fontSize textColor backColor'.split():
v = getattr(style,k,self)
if v is self: continue
D[k] = v
D.update(A)
A = D
if 'fontName' in A:
A['fontName'], A['bold'], A['italic'] = ps2tt(A['fontName'])
self._push('span',**A)
def end_span(self):
self._pop('span')
def start_br(self, attr):
self._push('br',_selfClosingTag='br',lineBreak=True,text='')
def end_br(self):
#print('\nend_br called, %d frags in list' % len(self.fragList))
frag = self._stack[-1]
if not (frag._selfClosingTag=='br' and frag.lineBreak):
raise ValueError('Parser failure in <br/>')
del frag._selfClosingTag
self.handle_data('')
self._pop('br')
def _initial_frag(self,attr,attrMap,bullet=0):
style = self._style
if attr!={}:
style = copy.deepcopy(style)
_applyAttributes(style,self.getAttributes(attr,attrMap))
self._style = style
# initialize semantic values
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0
frag.strike = 0
frag.greek = 0
frag.link = None
if bullet:
frag.fontName, frag.bold, frag.italic = ps2tt(style.bulletFontName)
frag.fontSize = style.bulletFontSize
frag.textColor = hasattr(style,'bulletColor') and style.bulletColor or style.textColor
else:
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
return frag
def start_para(self,attr):
frag = self._initial_frag(attr,_paraAttrMap)
frag.__tag__ = 'para'
self._stack = [frag]
def end_para(self):
self._pop('para')
def start_bullet(self,attr):
if hasattr(self,'bFragList'):
self._syntax_error('only one <bullet> tag allowed')
self.bFragList = []
frag = self._initial_frag(attr,_bulletAttrMap,1)
frag.isBullet = 1
frag.__tag__ = 'bullet'
self._stack.append(frag)
def end_bullet(self):
self._pop('bullet')
#---------------------------------------------------------------
def start_seqdefault(self, attr):
try:
default = attr['id']
except KeyError:
default = None
self._seq.setDefaultCounter(default)
def end_seqdefault(self):
pass
def start_seqreset(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
base = int(attr['base'])
except:
base=0
self._seq.reset(id, base)
def end_seqreset(self):
pass
def start_seqchain(self, attr):
try:
order = attr['order']
except KeyError:
order = ''
order = order.split()
seq = self._seq
for p,c in zip(order[:-1],order[1:]):
seq.chain(p, c)
end_seqchain = end_seqreset
def start_seqformat(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
value = attr['value']
except KeyError:
value = '1'
self._seq.setFormat(id,value)
end_seqformat = end_seqreset
# AR hacking in aliases to allow the proper casing for RML.
# the above ones should be deprecated over time. 2001-03-22
start_seqDefault = start_seqdefault
end_seqDefault = end_seqdefault
start_seqReset = start_seqreset
end_seqReset = end_seqreset
start_seqChain = start_seqchain
end_seqChain = end_seqchain
start_seqFormat = start_seqformat
end_seqFormat = end_seqformat
def start_seq(self, attr):
#if it has a template, use that; otherwise try for id;
#otherwise take default sequence
if 'template' in attr:
templ = attr['template']
self.handle_data(templ % self._seq)
return
elif 'id' in attr:
id = attr['id']
else:
id = None
increment = attr.get('inc', None)
if not increment:
output = self._seq.nextf(id)
else:
#accepts "no" for do not increment, or an integer.
#thus, 0 and 1 increment by the right amounts.
if increment.lower() == 'no':
output = self._seq.thisf(id)
else:
incr = int(increment)
output = self._seq.thisf(id)
self._seq.reset(id, self._seq._this() + incr)
self.handle_data(output)
def end_seq(self):
pass
def start_ondraw(self,attr):
defn = ABag()
if 'name' in attr: defn.name = attr['name']
else: self._syntax_error('<onDraw> needs at least a name attribute')
if 'label' in attr: defn.label = attr['label']
defn.kind='onDraw'
self._push('ondraw',cbDefn=defn)
self.handle_data('')
self._pop('ondraw')
start_onDraw=start_ondraw
end_onDraw=end_ondraw=end_seq
def start_index(self,attr):
attr=self.getAttributes(attr,_indexAttrMap)
defn = ABag()
if 'item' in attr:
label = attr['item']
else:
self._syntax_error('<index> needs at least an item attribute')
if 'name' in attr:
name = attr['name']
else:
name = DEFAULT_INDEX_NAME
format = attr.get('format',None)
if format is not None and format not in ('123','I','i','ABC','abc'):
raise ValueError('index tag format is %r not valid 123 I i ABC or abc' % offset)
offset = attr.get('offset',None)
if offset is not None:
try:
offset = int(offset)
except:
raise ValueError('index tag offset is %r not an int' % offset)
defn.label = encode_label((label,format,offset))
defn.name = name
defn.kind='index'
self._push('index',cbDefn=defn)
self.handle_data('')
self._pop('index',)
end_index=end_seq
def start_unknown(self,attr):
pass
end_unknown=end_seq
#---------------------------------------------------------------
def _push(self,tag,**attr):
frag = copy.copy(self._stack[-1])
frag.__tag__ = tag
_applyAttributes(frag,attr)
self._stack.append(frag)
def _pop(self,tag):
frag = self._stack.pop()
if tag==frag.__tag__: return frag
raise ValueError('Parse error: saw </%s> instead of expected </%s>' % (tag,frag.__tag__))
def getAttributes(self,attr,attrMap):
A = {}
for k, v in attr.items():
if not self.caseSensitive:
k = k.lower()
if k in list(attrMap.keys()):
j = attrMap[k]
func = j[1]
try:
A[j[0]] = v if func is None else func(v)
except:
self._syntax_error('%s: invalid value %s'%(k,v))
else:
self._syntax_error('invalid attribute name %s'%k)
return A
#----------------------------------------------------------------
def __init__(self,verbose=0, caseSensitive=0, ignoreUnknownTags=1):
HTMLParser.__init__(self,
**(dict(convert_charrefs=False) if sys.version_info>=(3,4) else {}))
self.verbose = verbose
#HTMLParser is case insenstive anyway, but the rml interface still needs this
#all start/end_ methods should have a lower case version for HMTMParser
self.caseSensitive = caseSensitive
self.ignoreUnknownTags = ignoreUnknownTags
def _iReset(self):
self.fragList = []
if hasattr(self, 'bFragList'): delattr(self,'bFragList')
def _reset(self, style):
'''reset the parser'''
HTMLParser.reset(self)
# initialize list of string segments to empty
self.errors = []
self._style = style
self._iReset()
#----------------------------------------------------------------
def handle_data(self,data):
"Creates an intermediate representation of string segments."
#The old parser would only 'see' a string after all entities had
#been processed. Thus, 'Hello ™ World' would emerge as one
#fragment. HTMLParser processes these separately. We want to ensure
#that successive calls like this are concatenated, to prevent too many
#fragments being created.
frag = copy.copy(self._stack[-1])
if hasattr(frag,'cbDefn'):
kind = frag.cbDefn.kind
if data: self._syntax_error('Only empty <%s> tag allowed' % kind)
elif hasattr(frag,'_selfClosingTag'):
if data!='': self._syntax_error('No content allowed in %s tag' % frag._selfClosingTag)
return
else:
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
if frag.sub:
frag.rise = -frag.fontSize*subFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
elif frag.super:
frag.rise = frag.fontSize*superFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
if frag.greek:
frag.fontName = 'symbol'
data = _greekConvert(data)
# bold, italic, and underline
frag.fontName = tt2ps(frag.fontName,frag.bold,frag.italic)
#save our data
frag.text = data
if hasattr(frag,'isBullet'):
delattr(frag,'isBullet')
self.bFragList.append(frag)
else:
self.fragList.append(frag)
def handle_cdata(self,data):
self.handle_data(data)
def _setup_for_parse(self,style):
self._seq = reportlab.lib.sequencer.getSequencer()
self._reset(style) # reinitialise the parser
def _complete_parse(self):
"Reset after parsing, to be ready for next paragraph"
del self._seq
style = self._style
del self._style
if len(self.errors)==0:
fragList = self.fragList
bFragList = hasattr(self,'bFragList') and self.bFragList or None
self._iReset()
else:
fragList = bFragList = None
return style, fragList, bFragList
def _tt_handle(self,tt):
"Iterate through a pre-parsed tuple tree (e.g. from pyRXP)"
#import pprint
#pprint.pprint(tt)
#find the corresponding start_tagname and end_tagname methods.
#These must be defined.
tag = tt[0]
try:
start = getattr(self,'start_'+tag)
end = getattr(self,'end_'+tag)
except AttributeError:
if not self.ignoreUnknownTags:
raise ValueError('Invalid tag "%s"' % tag)
start = self.start_unknown
end = self.end_unknown
#call the start_tagname method
start(tt[1] or {})
#if tree node has any children, they will either be further nodes,
#or text. Accordingly, call either this function, or handle_data.
C = tt[2]
if C:
M = self._tt_handlers
for c in C:
M[isinstance(c,(list,tuple))](c)
#call the end_tagname method
end()
def _tt_start(self,tt):
self._tt_handlers = self.handle_data,self._tt_handle
self._tt_handle(tt)
def tt_parse(self,tt,style):
'''parse from tupletree form'''
self._setup_for_parse(style)
self._tt_start(tt)
return self._complete_parse()
def findSpanStyle(self,style):
raise ValueError('findSpanStyle not implemented in this parser')
#HTMLParser interface
def parse(self, text, style):
"attempt replacement for parse"
self._setup_for_parse(style)
text = asUnicode(text)
if not(len(text)>=6 and text[0]=='<' and _re_para.match(text)):
text = u"<para>"+text+u"</para>"
try:
self.feed(text)
except:
annotateException('\nparagraph text %s caused exception' % ascii(text))
return self._complete_parse()
def handle_starttag(self, tag, attrs):
"Called by HTMLParser when a tag starts"
#tuple tree parser used to expect a dict. HTML parser
#gives list of two-element tuples
if isinstance(attrs, list):
d = {}
for (k, v) in attrs:
d[k] = v
attrs = d
if not self.caseSensitive: tag = tag.lower()
try:
start = getattr(self,'start_'+tag)
except AttributeError:
if not self.ignoreUnknownTags:
raise ValueError('Invalid tag "%s"' % tag)
start = self.start_unknown
#call it
start(attrs or {})
def handle_endtag(self, tag):
"Called by HTMLParser when a tag ends"
#find the existing end_tagname method
if not self.caseSensitive: tag = tag.lower()
try:
end = getattr(self,'end_'+tag)
except AttributeError:
if not self.ignoreUnknownTags:
raise ValueError('Invalid tag "%s"' % tag)
end = self.end_unknown
#call it
end()
def handle_entityref(self, name):
"Handles a named entity. "
try:
v = known_entities[name]
except:
v = u'&%s;' % name
self.handle_data(v)
if __name__=='__main__':
from reportlab.platypus import cleanBlockQuotedText
from reportlab.lib.styles import _baseFontName
_parser=ParaParser()
def check_text(text,p=_parser):
print('##########')
text = cleanBlockQuotedText(text)
l,rv,bv = p.parse(text,style)
if rv is None:
for l in _parser.errors:
print(l)
else:
print('ParaStyle', l.fontName,l.fontSize,l.textColor)
for l in rv:
sys.stdout.write(l.fontName,l.fontSize,l.textColor,l.bold, l.rise, '|%s|'%l.text[:25])
if hasattr(l,'cbDefn'):
print('cbDefn',getattr(l.cbDefn,'name',''),getattr(l.cbDefn,'label',''),l.cbDefn.kind)
else: print()
style=ParaFrag()
style.fontName=_baseFontName
style.fontSize = 12
style.textColor = black
style.bulletFontName = black
style.bulletFontName=_baseFontName
style.bulletFontSize=12
text='''
<b><i><greek>a</greek>D</i></b>β<unichr value="0x394"/>
<font name="helvetica" size="15" color=green>
Tell me, O muse, of that ingenious hero who travelled far and wide
after</font> he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you<super>1</super> may know them.
'''
check_text(text)
check_text('<para> </para>')
check_text('<para font="%s" size=24 leading=28.8 spaceAfter=72>ReportLab -- Reporting for the Internet Age</para>'%_baseFontName)
check_text('''
<font color=red>τ</font>Tell me, O muse, of that ingenious hero who travelled far and wide
after he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you may know them.''')
check_text('''
Telemachus took this speech as of good omen and rose at once, for
he was bursting with what he had to say. He stood in the middle of
the assembly and the good herald Pisenor brought him his staff. Then,
turning to Aegyptius, "Sir," said he, "it is I, as you will shortly
learn, who have convened you, for it is I who am the most aggrieved.
I have not got wind of any host approaching about which I would warn
you, nor is there any matter of public moment on which I would speak.
My grieveance is purely personal, and turns on two great misfortunes
which have fallen upon my house. The first of these is the loss of
my excellent father, who was chief among all you here present, and
was like a father to every one of you; the second is much more serious,
and ere long will be the utter ruin of my estate. The sons of all
the chief men among you are pestering my mother to marry them against
her will. They are afraid to go to her father Icarius, asking him
to choose the one he likes best, and to provide marriage gifts for
his daughter, but day by day they keep hanging about my father's house,
sacrificing our oxen, sheep, and fat goats for their banquets, and
never giving so much as a thought to the quantity of wine they drink.
No estate can stand such recklessness; we have now no Ulysses to ward
off harm from our doors, and I cannot hold my own against them. I
shall never all my days be as good a man as he was, still I would
indeed defend myself if I had power to do so, for I cannot stand such
treatment any longer; my house is being disgraced and ruined. Have
respect, therefore, to your own consciences and to public opinion.
Fear, too, the wrath of heaven, lest the gods should be displeased
and turn upon you. I pray you by Jove and Themis, who is the beginning
and the end of councils, [do not] hold back, my friends, and leave
me singlehanded- unless it be that my brave father Ulysses did some
wrong to the Achaeans which you would now avenge on me, by aiding
and abetting these suitors. Moreover, if I am to be eaten out of house
and home at all, I had rather you did the eating yourselves, for I
could then take action against you to some purpose, and serve you
with notices from house to house till I got paid in full, whereas
now I have no remedy."''')
check_text('''
But as the sun was rising from the fair sea into the firmament of
heaven to shed light on mortals and immortals, they reached Pylos
the city of Neleus. Now the people of Pylos were gathered on the sea
shore to offer sacrifice of black bulls to Neptune lord of the Earthquake.
There were nine guilds with five hundred men in each, and there were
nine bulls to each guild. As they were eating the inward meats and
burning the thigh bones [on the embers] in the name of Neptune, Telemachus
and his crew arrived, furled their sails, brought their ship to anchor,
and went ashore. ''')
check_text('''
So the neighbours and kinsmen of Menelaus were feasting and making
merry in his house. There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''
"When we had passed the [Wandering] rocks, with Scylla and terrible
Charybdis, we reached the noble island of the sun-god, where were
the goodly cattle and sheep belonging to the sun Hyperion. While still
at sea in my ship I could bear the cattle lowing as they came home
to the yards, and the sheep bleating. Then I remembered what the blind
Theban prophet Teiresias had told me, and how carefully Aeaean Circe
had warned me to shun the island of the blessed sun-god. So being
much troubled I said to the men, 'My men, I know you are hard pressed,
but listen while I <strike>tell you the prophecy that</strike> Teiresias made me, and
how carefully Aeaean Circe warned me to shun the island of the blessed
sun-god, for it was here, she said, that our worst danger would lie.
Head the ship, therefore, away from the island.''')
check_text('''A<B>C&D"E'F''')
check_text('''A< B> C& D" E' F''')
check_text('''<![CDATA[<>&'"]]>''')
check_text('''<bullet face=courier size=14 color=green>+</bullet>
There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''<onDraw name="myFunc" label="aaa bbb">A paragraph''')
check_text('''<para><onDraw name="myFunc" label="aaa bbb">B paragraph</para>''')
# HVB, 30.05.2003: Test for new features
_parser.caseSensitive=0
check_text('''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''')
check_text('''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''')
check_text('''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''')
check_text('''Before the break <br/>the middle line <br/> and the last line.''')
check_text('''This should be an inline image <img src='../../../docs/images/testimg.gif'/>!''')
check_text('''aaa bbbb <u>underline </u> cccc''')
| gpl-3.0 |
visionegg/visionegg | demo/mouse_gabor_2d.py | 1 | 6332 | #!/usr/bin/env python
"""sinusoidal grating in gaussian window"""
from VisionEgg import *
start_default_logging(); watch_exceptions()
from VisionEgg.Core import *
from VisionEgg.Gratings import *
from VisionEgg.SphereMap import *
from VisionEgg.Text import *
from VisionEgg.Textures import *
import VisionEgg.ParameterTypes as ve_types
import math, os
import pygame
import OpenGL.GL as gl
def get_mouse_position():
# convert to OpenGL coordinates
(x,y) = pygame.mouse.get_pos()
y = screen.size[1]-y
return x,y
screen = get_default_screen()
mask = Mask2D(function='gaussian', # also supports 'circle'
radius_parameter=25, # sigma for gaussian, radius for circle (units: num_samples)
num_samples=(512,512)) # this many texture elements in mask (covers whole size specified below)
grating_stimulus = SinGrating2D(mask = mask,
position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 800 , 800 ),
spatial_freq = 10.0 / screen.size[0], # units of cycles/pixel
temporal_freq_hz = 1.0,
num_samples = 1024,
orientation = 45.0 )
text_color = (0.0,0.0,1.0) # RGB ( blue)
xpos = 10.0
yspace = 5
text_params = {'anchor':'lowerleft','color':text_color,'font_size':20}
text_stimuli = []
ypos = 0
text_stimuli.append( Text( text = "Numeric keypad changes grating orientation.",
position=(xpos,ypos),**text_params))
ypos += text_stimuli[-1].parameters.size[1] + yspace
tf_text = Text(text = "'t/T' changes TF (now %.2f hz)"%(grating_stimulus.parameters.temporal_freq_hz),
position=(xpos,ypos),**text_params)
text_stimuli.append( tf_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
text_stimuli.append( Text( text = "'-' shrinks window, '+' grows window (slow)",
position=(xpos,ypos),**text_params))
ypos += text_stimuli[-1].parameters.size[1] + yspace
sf_text = Text(text = "'s/S' changes SF (now %.3f cycles/pixel = %.1f pixels/cycle)"%(grating_stimulus.parameters.spatial_freq,1.0/grating_stimulus.parameters.spatial_freq),
position=(xpos,ypos),**text_params)
text_stimuli.append( sf_text )
ypos += text_stimuli[-1].parameters.size[1] + yspace
text_stimuli.append( Text( text = "Mouse moves gabor, press Esc to quit",
position=(xpos,ypos),**text_params))
ypos += text_stimuli[-1].parameters.size[1] + yspace
text_stimuli.append( Text( text = "Demonstration of mouse controlled gabor.",
position=(xpos,ypos),**text_params))
viewport = Viewport(screen=screen,
stimuli=[grating_stimulus] + text_stimuli)
quit_now = False
shift_key = False
frame_timer = FrameTimer()
while not quit_now:
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
quit_now = True
elif event.type == pygame.locals.KEYUP:
if event.key in [pygame.locals.K_LSHIFT,pygame.locals.K_RSHIFT]:
shift_key = False
elif event.type == pygame.locals.KEYDOWN:
if event.key == pygame.locals.K_ESCAPE:
quit_now = True
elif event.key in [pygame.locals.K_LSHIFT,pygame.locals.K_RSHIFT]:
shift_key = True
elif event.key == pygame.locals.K_KP1:
grating_stimulus.parameters.orientation = 225.0
elif event.key == pygame.locals.K_KP2:
grating_stimulus.parameters.orientation = 270.0
elif event.key == pygame.locals.K_KP3:
grating_stimulus.parameters.orientation = 315.0
elif event.key == pygame.locals.K_KP6:
grating_stimulus.parameters.orientation = 0.0
elif event.key == pygame.locals.K_KP9:
grating_stimulus.parameters.orientation = 45.0
elif event.key == pygame.locals.K_KP8:
grating_stimulus.parameters.orientation = 90.0
elif event.key == pygame.locals.K_KP7:
grating_stimulus.parameters.orientation = 135.0
elif event.key == pygame.locals.K_KP4:
grating_stimulus.parameters.orientation = 180.0
elif event.key == pygame.locals.K_s:
if shift_key:
grating_stimulus.parameters.spatial_freq *= (1.0/1.5)
else:
grating_stimulus.parameters.spatial_freq *= 1.5
sf_text.parameters.text = "'s/S' changes SF (now %.3f cycles per pixel = %.1f pixels per cycle)"%(grating_stimulus.parameters.spatial_freq,1.0/grating_stimulus.parameters.spatial_freq)
elif event.key == pygame.locals.K_t:
if shift_key:
grating_stimulus.parameters.temporal_freq_hz *= (1.0/1.5)
else:
grating_stimulus.parameters.temporal_freq_hz *= 1.5
tf_text.parameters.text = "'t/T' changes TF (now %.2f hz)"%(grating_stimulus.parameters.temporal_freq_hz)
elif event.key == pygame.locals.K_MINUS:
old_params = grating_stimulus.parameters.mask.constant_parameters
new_radius = old_params.radius_parameter * 0.8
new_mask = Mask2D(function=old_params.function,
radius_parameter=old_params.radius_parameter*0.8,
num_samples=old_params.num_samples)
grating_stimulus.parameters.mask = new_mask
elif event.key == pygame.locals.K_EQUALS:
old_params = grating_stimulus.parameters.mask.constant_parameters
new_radius = old_params.radius_parameter * 0.8
new_mask = Mask2D(function=old_params.function,
radius_parameter=old_params.radius_parameter/0.8,
num_samples=old_params.num_samples)
grating_stimulus.parameters.mask = new_mask
screen.clear()
x,y = get_mouse_position()
grating_stimulus.parameters.position = x,y
viewport.draw()
swap_buffers()
frame_timer.tick()
frame_timer.log_histogram()
| lgpl-2.1 |
Teagan42/home-assistant | homeassistant/components/songpal/media_player.py | 3 | 10903 | """Support for Songpal-enabled (Sony) media devices."""
import asyncio
from collections import OrderedDict
import logging
from songpal import (
ConnectChange,
ContentChange,
Device,
PowerChange,
SongpalException,
VolumeChange,
)
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_ON,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SET_SOUND_SETTING
_LOGGER = logging.getLogger(__name__)
CONF_ENDPOINT = "endpoint"
PARAM_NAME = "name"
PARAM_VALUE = "value"
PLATFORM = "songpal"
SUPPORT_SONGPAL = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_ENDPOINT): cv.string}
)
SET_SOUND_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(PARAM_NAME): cv.string,
vol.Required(PARAM_VALUE): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Songpal platform."""
if PLATFORM not in hass.data:
hass.data[PLATFORM] = {}
if discovery_info is not None:
name = discovery_info["name"]
endpoint = discovery_info["properties"]["endpoint"]
_LOGGER.debug("Got autodiscovered %s - endpoint: %s", name, endpoint)
device = SongpalDevice(name, endpoint)
else:
name = config.get(CONF_NAME)
endpoint = config.get(CONF_ENDPOINT)
device = SongpalDevice(name, endpoint, poll=False)
if endpoint in hass.data[PLATFORM]:
_LOGGER.debug("The endpoint exists already, skipping setup.")
return
try:
await device.initialize()
except SongpalException as ex:
_LOGGER.error("Unable to get methods from songpal: %s", ex)
raise PlatformNotReady
hass.data[PLATFORM][endpoint] = device
async_add_entities([device], True)
async def async_service_handler(service):
"""Service handler."""
entity_id = service.data.get("entity_id", None)
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
for device in hass.data[PLATFORM].values():
if device.entity_id == entity_id or entity_id is None:
_LOGGER.debug(
"Calling %s (entity: %s) with params %s", service, entity_id, params
)
await device.async_set_sound_setting(
params[PARAM_NAME], params[PARAM_VALUE]
)
hass.services.async_register(
DOMAIN, SET_SOUND_SETTING, async_service_handler, schema=SET_SOUND_SCHEMA
)
class SongpalDevice(MediaPlayerDevice):
"""Class representing a Songpal device."""
def __init__(self, name, endpoint, poll=False):
"""Init."""
self._name = name
self._endpoint = endpoint
self._poll = poll
self.dev = Device(self._endpoint)
self._sysinfo = None
self._state = False
self._available = False
self._initialized = False
self._volume_control = None
self._volume_min = 0
self._volume_max = 1
self._volume = 0
self._is_muted = False
self._active_source = None
self._sources = {}
@property
def should_poll(self):
"""Return True if the device should be polled."""
return self._poll
async def initialize(self):
"""Initialize the device."""
await self.dev.get_supported_methods()
self._sysinfo = await self.dev.get_system_info()
async def async_activate_websocket(self):
"""Activate websocket for listening if wanted."""
_LOGGER.info("Activating websocket connection..")
async def _volume_changed(volume: VolumeChange):
_LOGGER.debug("Volume changed: %s", volume)
self._volume = volume.volume
self._is_muted = volume.mute
await self.async_update_ha_state()
async def _source_changed(content: ContentChange):
_LOGGER.debug("Source changed: %s", content)
if content.is_input:
self._active_source = self._sources[content.source]
_LOGGER.debug("New active source: %s", self._active_source)
await self.async_update_ha_state()
else:
_LOGGER.debug("Got non-handled content change: %s", content)
async def _power_changed(power: PowerChange):
_LOGGER.debug("Power changed: %s", power)
self._state = power.status
await self.async_update_ha_state()
async def _try_reconnect(connect: ConnectChange):
_LOGGER.error(
"Got disconnected with %s, trying to reconnect.", connect.exception
)
self._available = False
self.dev.clear_notification_callbacks()
await self.async_update_ha_state()
# Try to reconnect forever, a successful reconnect will initialize
# the websocket connection again.
delay = 10
while not self._available:
_LOGGER.debug("Trying to reconnect in %s seconds", delay)
await asyncio.sleep(delay)
# We need to inform HA about the state in case we are coming
# back from a disconnected state.
await self.async_update_ha_state(force_refresh=True)
delay = min(2 * delay, 300)
_LOGGER.info("Reconnected to %s", self.name)
self.dev.on_notification(VolumeChange, _volume_changed)
self.dev.on_notification(ContentChange, _source_changed)
self.dev.on_notification(PowerChange, _power_changed)
self.dev.on_notification(ConnectChange, _try_reconnect)
async def listen_events():
await self.dev.listen_notifications()
async def handle_stop(event):
await self.dev.stop_listen_notifications()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, handle_stop)
self.hass.loop.create_task(listen_events())
@property
def name(self):
"""Return name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._sysinfo.macAddr
@property
def available(self):
"""Return availability of the device."""
return self._available
async def async_set_sound_setting(self, name, value):
"""Change a setting on the device."""
await self.dev.set_sound_settings(name, value)
async def async_update(self):
"""Fetch updates from the device."""
try:
volumes = await self.dev.get_volume_information()
if not volumes:
_LOGGER.error("Got no volume controls, bailing out")
self._available = False
return
if len(volumes) > 1:
_LOGGER.debug("Got %s volume controls, using the first one", volumes)
volume = volumes[0]
_LOGGER.debug("Current volume: %s", volume)
self._volume_max = volume.maxVolume
self._volume_min = volume.minVolume
self._volume = volume.volume
self._volume_control = volume
self._is_muted = self._volume_control.is_muted
status = await self.dev.get_power()
self._state = status.status
_LOGGER.debug("Got state: %s", status)
inputs = await self.dev.get_inputs()
_LOGGER.debug("Got ins: %s", inputs)
self._sources = OrderedDict()
for input_ in inputs:
self._sources[input_.uri] = input_
if input_.active:
self._active_source = input_
_LOGGER.debug("Active source: %s", self._active_source)
self._available = True
# activate notifications if wanted
if not self._poll:
await self.hass.async_create_task(self.async_activate_websocket())
except SongpalException as ex:
_LOGGER.error("Unable to update: %s", ex)
self._available = False
async def async_select_source(self, source):
"""Select source."""
for out in self._sources.values():
if out.title == source:
await out.activate()
return
_LOGGER.error("Unable to find output: %s", source)
@property
def source_list(self):
"""Return list of available sources."""
return [src.title for src in self._sources.values()]
@property
def state(self):
"""Return current state."""
if self._state:
return STATE_ON
return STATE_OFF
@property
def source(self):
"""Return currently active source."""
# Avoid a KeyError when _active_source is not (yet) populated
return getattr(self._active_source, "title", None)
@property
def volume_level(self):
"""Return volume level."""
volume = self._volume / self._volume_max
return volume
async def async_set_volume_level(self, volume):
"""Set volume level."""
volume = int(volume * self._volume_max)
_LOGGER.debug("Setting volume to %s", volume)
return await self._volume_control.set_volume(volume)
async def async_volume_up(self):
"""Set volume up."""
return await self._volume_control.set_volume("+1")
async def async_volume_down(self):
"""Set volume down."""
return await self._volume_control.set_volume("-1")
async def async_turn_on(self):
"""Turn the device on."""
return await self.dev.set_power(True)
async def async_turn_off(self):
"""Turn the device off."""
return await self.dev.set_power(False)
async def async_mute_volume(self, mute):
"""Mute or unmute the device."""
_LOGGER.debug("Set mute: %s", mute)
return await self._volume_control.set_mute(mute)
@property
def is_volume_muted(self):
"""Return whether the device is muted."""
return self._is_muted
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_SONGPAL
| apache-2.0 |
eeshangarg/oh-mainline | vendor/packages/irc/irc/modes.py | 21 | 2490 | def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is "+" or "-". The argument is
always None.
Example:
>>> parse_nick_modes("+ab-c")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is "+" or "-". The argument is
None if mode isn't one of "b", "k", "l", "v", "o", "h", or "q".
Example:
>>> parse_channel_modes("+ab-c foo")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvohq")
def _parse_modes(mode_string, unary_modes=""):
"""
Parse the mode_string and return a list of triples.
If no string is supplied return an empty list.
>>> _parse_modes('')
[]
If no sign is supplied, return an empty list.
>>> _parse_modes('ab')
[]
Discard unused args.
>>> _parse_modes('+a foo bar baz')
[['+', 'a', None]]
Return none for unary args when not provided
>>> _parse_modes('+abc foo', unary_modes='abc')
[['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]]
This function never throws an error:
>>> import random
>>> import six
>>> unichr = chr if six.PY3 else unichr
>>> def random_text(min_len = 3, max_len = 80):
... len = random.randint(min_len, max_len)
... chars_to_choose = [unichr(x) for x in range(0,1024)]
... chars = (random.choice(chars_to_choose) for x in range(len))
... return ''.join(chars)
>>> def random_texts(min_len = 3, max_len = 80):
... while True:
... yield random_text(min_len, max_len)
>>> import itertools
>>> texts = itertools.islice(random_texts(), 1000)
>>> set(type(_parse_modes(text)) for text in texts) == set([list])
True
"""
# mode_string must be non-empty and begin with a sign
if not mode_string or not mode_string[0] in '+-':
return []
modes = []
parts = mode_string.split()
mode_part, args = parts[0], parts[1:]
for ch in mode_part:
if ch in "+-":
sign = ch
continue
arg = args.pop(0) if ch in unary_modes and args else None
modes.append([sign, ch, arg])
return modes
| agpl-3.0 |
marcosmodesto/django-testapp | django/views/i18n.py | 57 | 9796 | import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.utils import importlib
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils.text import javascript_quote
from django.utils.encoding import smart_unicode
from django.utils.formats import get_format_modules, get_format
from django.utils.http import is_safe_url
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next')
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
src = []
for k, v in result.items():
if isinstance(v, (basestring, int)):
src.append("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_unicode(v))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_unicode(value)) for value in v]
src.append("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v)))
return ''.join(src)
NullSource = """
/* gettext identity library */
function gettext(msgid) { return msgid; }
function ngettext(singular, plural, count) { return (count == 1) ? singular : plural; }
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) { return msgid; }
function npgettext(context, singular, plural, count) { return (count == 1) ? singular : plural; }
"""
LibHead = """
/* gettext library */
var catalog = new Array();
"""
LibFoot = """
function gettext(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
}
function ngettext(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
}
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) {
var value = gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
}
function npgettext(context, singular, plural, count) {
var value = ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
}
"""
LibFormatHead = """
/* formatting library */
var formats = new Array();
"""
LibFormatFoot = """
function get_format(format_type) {
var value = formats[format_type];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return value;
}
}
"""
SimplePlural = """
function pluralidx(count) { return (count == 1) ? 0 : 1; }
"""
InterPolate = r"""
function interpolate(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
}
"""
PluralIdx = r"""
function pluralidx(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
}
"""
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot]
return http.HttpResponse(''.join(src), 'text/javascript')
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, basestring):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
src = [LibHead]
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
src.append(PluralIdx % plural)
else:
src.append(SimplePlural)
csrc = []
pdict = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, basestring):
csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif isinstance(k, tuple):
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError(k)
csrc.sort()
for k, v in pdict.items():
src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
src.extend(csrc)
src.append(LibFoot)
src.append(InterPolate)
src.append(LibFormatHead)
src.append(get_formats())
src.append(LibFormatFoot)
src = ''.join(src)
return http.HttpResponse(src, 'text/javascript')
| bsd-3-clause |
Dino0631/RedRain-Bot | lib/youtube_dl/extractor/vidme.py | 26 | 9023 | from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
parse_iso8601,
)
class VidmeIE(InfoExtractor):
IE_NAME = 'vidme'
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{,5})(?:[^\da-zA-Z]|$)'
_TESTS = [{
'url': 'https://vid.me/QNB',
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
'info_dict': {
'id': 'QNB',
'ext': 'mp4',
'title': 'Fishing for piranha - the easy way',
'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1406313244,
'upload_date': '20140725',
'age_limit': 0,
'duration': 119.92,
'view_count': int,
'like_count': int,
'comment_count': int,
},
}, {
'url': 'https://vid.me/Gc6M',
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
'info_dict': {
'id': 'Gc6M',
'ext': 'mp4',
'title': 'O Mere Dil ke chain - Arnav and Khushi VM',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1441211642,
'upload_date': '20150902',
'uploader': 'SunshineM',
'uploader_id': '3552827',
'age_limit': 0,
'duration': 223.72,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# tests uploader field
'url': 'https://vid.me/4Iib',
'info_dict': {
'id': '4Iib',
'ext': 'mp4',
'title': 'The Carver',
'description': 'md5:e9c24870018ae8113be936645b93ba3c',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1433203629,
'upload_date': '20150602',
'uploader': 'Thomas',
'uploader_id': '109747',
'age_limit': 0,
'duration': 97.859999999999999,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
'url': 'https://vid.me/e/Wmur',
'info_dict': {
'id': 'Wmur',
'ext': 'mp4',
'title': 'naked smoking & stretching',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1430931613,
'upload_date': '20150506',
'uploader': 'naked-yogi',
'uploader_id': '1638622',
'age_limit': 18,
'duration': 653.26999999999998,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# nsfw, user-disabled
'url': 'https://vid.me/dzGJ',
'only_matching': True,
}, {
# suspended
'url': 'https://vid.me/Ox3G',
'only_matching': True,
}, {
# deleted
'url': 'https://vid.me/KTPm',
'only_matching': True,
}, {
# no formats in the API response
'url': 'https://vid.me/e5g',
'info_dict': {
'id': 'e5g',
'ext': 'mp4',
'title': 'Video upload (e5g)',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1401480195,
'upload_date': '20140530',
'uploader': None,
'uploader_id': None,
'age_limit': 0,
'duration': 483,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
response = self._download_json(
'https://api.vid.me/videoByUrl/%s' % video_id, video_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
response = self._parse_json(e.cause.read(), video_id)
else:
raise
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
video = response['video']
if video.get('state') == 'deleted':
raise ExtractorError(
'Vidme said: Sorry, this video has been deleted.',
expected=True)
if video.get('state') in ('user-disabled', 'suspended'):
raise ExtractorError(
'Vidme said: This video has been suspended either due to a copyright claim, '
'or for violating the terms of use.',
expected=True)
formats = [{
'format_id': f.get('type'),
'url': f['uri'],
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'preference': 0 if f.get('type', '').endswith('clip') else 1,
} for f in video.get('formats', []) if f.get('uri')]
if not formats and video.get('complete_url'):
formats.append({
'url': video.get('complete_url'),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
})
self._sort_formats(formats)
title = video['title']
description = video.get('description')
thumbnail = video.get('thumbnail_url')
timestamp = parse_iso8601(video.get('date_created'), ' ')
uploader = video.get('user', {}).get('username')
uploader_id = video.get('user', {}).get('user_id')
age_limit = 18 if video.get('nsfw') is True else 0
duration = float_or_none(video.get('duration'))
view_count = int_or_none(video.get('view_count'))
like_count = int_or_none(video.get('likes_count'))
comment_count = int_or_none(video.get('comment_count'))
return {
'id': video_id,
'title': title or 'Video upload (%s)' % video_id,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'age_limit': age_limit,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'formats': formats,
}
class VidmeListBaseIE(InfoExtractor):
# Max possible limit according to https://docs.vid.me/#api-Videos-List
_LIMIT = 100
def _entries(self, user_id, user_name):
for page_num in itertools.count(1):
page = self._download_json(
'https://api.vid.me/videos/%s?user=%s&limit=%d&offset=%d'
% (self._API_ITEM, user_id, self._LIMIT, (page_num - 1) * self._LIMIT),
user_name, 'Downloading user %s page %d' % (self._API_ITEM, page_num))
videos = page.get('videos', [])
if not videos:
break
for video in videos:
video_url = video.get('full_url') or video.get('embed_url')
if video_url:
yield self.url_result(video_url, VidmeIE.ie_key())
total = int_or_none(page.get('page', {}).get('total'))
if total and self._LIMIT * page_num >= total:
break
def _real_extract(self, url):
user_name = self._match_id(url)
user_id = self._download_json(
'https://api.vid.me/userByUsername?username=%s' % user_name,
user_name)['user']['user_id']
return self.playlist_result(
self._entries(user_id, user_name), user_id,
'%s - %s' % (user_name, self._TITLE))
class VidmeUserIE(VidmeListBaseIE):
IE_NAME = 'vidme:user'
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{6,})(?!/likes)(?:[^\da-zA-Z]|$)'
_API_ITEM = 'list'
_TITLE = 'Videos'
_TEST = {
'url': 'https://vid.me/EFARCHIVE',
'info_dict': {
'id': '3834632',
'title': 'EFARCHIVE - %s' % _TITLE,
},
'playlist_mincount': 238,
}
class VidmeUserLikesIE(VidmeListBaseIE):
IE_NAME = 'vidme:user:likes'
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{6,})/likes'
_API_ITEM = 'likes'
_TITLE = 'Likes'
_TEST = {
'url': 'https://vid.me/ErinAlexis/likes',
'info_dict': {
'id': '6483530',
'title': 'ErinAlexis - %s' % _TITLE,
},
'playlist_mincount': 415,
}
| gpl-3.0 |
kyvinh/home-assistant | homeassistant/components/sensor/dweet.py | 23 | 3540 | """
Support for showing values from Dweet.io.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.dweet/
"""
import json
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_VALUE_TEMPLATE, STATE_UNKNOWN, CONF_UNIT_OF_MEASUREMENT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['dweepy==0.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_DEVICE = 'device'
DEFAULT_NAME = 'Dweet.io Sensor'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICE): cv.string,
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
})
# pylint: disable=unused-variable, too-many-function-args
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Dweet sensor."""
import dweepy
name = config.get(CONF_NAME)
device = config.get(CONF_DEVICE)
value_template = config.get(CONF_VALUE_TEMPLATE)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
value_template.hass = hass
try:
content = json.dumps(dweepy.get_latest_dweet_for(device)[0]['content'])
except dweepy.DweepyError:
_LOGGER.error("Device/thing '%s' could not be found", device)
return False
if value_template.render_with_possible_json_value(content) == '':
_LOGGER.error("'%s' was not found", value_template)
return False
dweet = DweetData(device)
add_devices([DweetSensor(hass, dweet, name, value_template, unit)])
class DweetSensor(Entity):
"""Representation of a Dweet sensor."""
def __init__(self, hass, dweet, name, value_template, unit_of_measurement):
"""Initialize the sensor."""
self.hass = hass
self.dweet = dweet
self._name = name
self._value_template = value_template
self._state = STATE_UNKNOWN
self._unit_of_measurement = unit_of_measurement
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state."""
if self.dweet.data is None:
return STATE_UNKNOWN
else:
values = json.dumps(self.dweet.data[0]['content'])
value = self._value_template.render_with_possible_json_value(
values)
return value
def update(self):
"""Get the latest data from REST API."""
self.dweet.update()
class DweetData(object):
"""The class for handling the data retrieval."""
def __init__(self, device):
"""Initialize the sensor."""
self._device = device
self.data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Dweet.io."""
import dweepy
try:
self.data = dweepy.get_latest_dweet_for(self._device)
except dweepy.DweepyError:
_LOGGER.error("Device '%s' could not be found", self._device)
self.data = None
| apache-2.0 |
tomprince/gemrb | gemrb/GUIScripts/CommonWindow.py | 3 | 9763 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2010 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import GemRB
from GUIDefines import GS_DIALOGMASK, OP_SET
# message window expansion
def OnIncreaseSize():
GSFlags = GemRB.GetMessageWindowSize()
Expand = GSFlags&GS_DIALOGMASK
GSFlags = GSFlags-Expand
if Expand>2:
return
Expand = (Expand + 1)*2
GemRB.GameSetScreenFlags(Expand + GSFlags, OP_SET)
# message window contraction
def OnDecreaseSize():
GSFlags = GemRB.GetMessageWindowSize()
Expand = GSFlags&GS_DIALOGMASK
GSFlags = GSFlags-Expand
if Expand<2:
return
Expand = Expand/2 - 1 # 6->2, 2->0
GemRB.GameSetScreenFlags(Expand + GSFlags, OP_SET)
##################################################################
# functions dealing with containers
##################################################################
import GUICommon
import GUIClasses
import GUIWORLD
from ie_stats import *
from GUIDefines import *
ContainerWindow = None
Container = None
if GUICommon.GameIsIWD2():
leftdiv = 5
ground_size = 10
else:
leftdiv = 3
ground_size = 6
if GUICommon.GameIsPST():
import GUICommonWindows
def UpdateContainerWindow ():
global Container
Window = ContainerWindow
pc = GemRB.GameGetFirstSelectedPC ()
if GUICommon.GameIsPST():
GUICommon.SetEncumbranceLabels (Window, 54, None, pc, True)
else:
GUICommon.SetEncumbranceLabels (Window, 0x10000043, 0x10000044, pc)
party_gold = GemRB.GameGetPartyGold ()
Text = Window.GetControl (0x10000036)
Text.SetText (str (party_gold))
Container = GemRB.GetContainer (0) #will use first selected pc anyway
LeftCount = Container['ItemCount']
ScrollBar = Window.GetControl (52)
Count = LeftCount / leftdiv
if Count < 1:
Count = 1
ScrollBar.SetVarAssoc ("LeftTopIndex", Count)
inventory_slots = GemRB.GetSlots (pc, 0x8000)
RightCount = len(inventory_slots)
ScrollBar = Window.GetControl (53)
Count = RightCount / 2
if Count < 1:
Count = 1
ScrollBar.SetVarAssoc ("RightTopIndex", Count)
RedrawContainerWindow ()
def RedrawContainerWindow ():
Window = ContainerWindow
LeftTopIndex = GemRB.GetVar ("LeftTopIndex") * 3
LeftIndex = GemRB.GetVar ("LeftIndex")
RightTopIndex = GemRB.GetVar ("RightTopIndex") * 2
RightIndex = GemRB.GetVar ("RightIndex")
LeftCount = Container['ItemCount']
pc = GemRB.GameGetFirstSelectedPC ()
inventory_slots = GemRB.GetSlots (pc, 0x8000)
RightCount = len(inventory_slots)
for i in range (ground_size):
#this is an autoselected container, but we could use PC too
Slot = GemRB.GetContainerItem (0, i+LeftTopIndex)
Button = Window.GetControl (i)
if Slot:
Button.SetVarAssoc ("LeftIndex", LeftTopIndex+i)
function = TakeItemContainer
else:
Button.SetVarAssoc ("LeftIndex", -1)
function = None
if GUICommon.GameIsPST():
GUICommonWindows.SetItemButton (Window, Button, Slot, function, None)
else:
GUICommon.UpdateInventorySlot (pc, Button, Slot, "container")
for i in range (4):
if i+RightTopIndex < RightCount:
Slot = GemRB.GetSlotItem (pc, inventory_slots[i+RightTopIndex])
else:
Slot = None
Button = Window.GetControl (i+10)
#pst had a redundant call here, reenable if it turns out it isn't redundant:
#GUICommonWindows.SetItemButton (Window, Button, Slot, None, None)
if Slot:
Button.SetVarAssoc ("RightIndex", RightTopIndex+i)
function = DropItemContainer
else:
Button.SetVarAssoc ("RightIndex", -1)
function = None
if GUICommon.GameIsPST():
GUICommonWindows.SetItemButton (Window, Button, Slot, function, None)
else:
GUICommon.UpdateInventorySlot (pc, Button, Slot, "inventory")
# shade the inventory icon if it is full
if Window.HasControl (54):
Button = Window.GetControl (54)
free_slots = GemRB.GetSlots (pc, 0x8000, -1)
if free_slots == ():
Button.SetState (IE_GUI_BUTTON_PRESSED)
else:
Button.SetState (IE_GUI_BUTTON_LOCKED)
def OpenContainerWindow ():
global ContainerWindow, Container
if ContainerWindow:
return
hideflag = GemRB.HideGUI ()
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ContainerWindow = Window = GemRB.LoadWindow (8)
#stop gears from interfering
if GUICommon.GameIsPST():
GUIWORLD.OldPortraitWindow = GUIClasses.GWindow( GemRB.GetVar ("PortraitWindow") )
GUICommonWindows.DisableAnimatedWindows ()
if GUICommon.GameIsIWD2():
GUIWORLD.OldMessageWindow = GUIClasses.GWindow( GemRB.GetVar ("MessageWindow") )
GemRB.SetVar ("MessageWindow", Window.ID)
else:
GUIWORLD.OldActionsWindow = GUIClasses.GWindow( GemRB.GetVar ("ActionsWindow") )
GUIWORLD.OldMessageWindow = GUIClasses.GWindow( GemRB.GetVar ("MessageWindow") )
GemRB.SetVar ("MessageWindow", -1)
GemRB.SetVar ("ActionsWindow", Window.ID)
Container = GemRB.GetContainer(0)
# Gears (time) when options pane is down
if GUICommon.GameIsBG2():
Button = Window.GetControl (62)
Label = Button.CreateLabelOnButton (0x1000003e, "NORMAL", 0)
Label.SetAnimation ("CPEN")
Button.SetAnimation ("CGEAR")
Button.SetBAM ("CDIAL", 0, 0)
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_ANIMATED|IE_GUI_BUTTON_NORMAL, OP_SET)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommon.GearsClicked)
GUICommon.SetGamedaysAndHourToken()
Button.SetTooltip(16041)
# 0-5 - Ground Item
for i in range (ground_size):
Button = Window.GetControl (i)
Button.SetVarAssoc ("LeftIndex", i)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, TakeItemContainer)
if GUICommon.GameIsPST():
Button.SetFlags (IE_GUI_BUTTON_ALIGN_RIGHT | IE_GUI_BUTTON_ALIGN_BOTTOM, OP_OR)
# 10-13 - Personal Item
for i in range (4):
Button = Window.GetControl (i+10)
Button.SetVarAssoc ("RightIndex", i)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, DropItemContainer)
if GUICommon.GameIsPST():
Button.SetFlags (IE_GUI_BUTTON_ALIGN_RIGHT | IE_GUI_BUTTON_ALIGN_BOTTOM, OP_OR)
# left scrollbar (container)
ScrollBar = Window.GetControl (52)
ScrollBar.SetEvent (IE_GUI_SCROLLBAR_ON_CHANGE, RedrawContainerWindow)
# right scrollbar (inventory)
ScrollBar = Window.GetControl (53)
ScrollBar.SetEvent (IE_GUI_SCROLLBAR_ON_CHANGE, RedrawContainerWindow)
# encumbrance and inventory icon
# iwd has a handy button
if Window.HasControl (54):
Button = Window.GetControl (54)
if GUICommon.GameIsPST():
Button.SetFont ("NUMBER")
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.CreateLabelOnButton (0x10000043, "NUMBER", IE_FONT_ALIGN_LEFT|IE_FONT_ALIGN_TOP)
Button.CreateLabelOnButton (0x10000044, "NUMBER", IE_FONT_ALIGN_RIGHT|IE_FONT_ALIGN_BOTTOM)
else:
Label = Window.CreateLabel (0x10000043, 323,14,60,15,"NUMBER","0:",IE_FONT_ALIGN_LEFT|IE_FONT_ALIGN_TOP)
Label = Window.CreateLabel (0x10000044, 323,20,80,15,"NUMBER","0:",IE_FONT_ALIGN_RIGHT|IE_FONT_ALIGN_TOP)
# container icon
Button = Window.GetControl (50)
if GUICommon.GameIsPST():
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetState (IE_GUI_BUTTON_LOCKED)
if not GUICommon.GameIsPST():
Table = GemRB.LoadTable ("containr")
row = Container['Type']
tmp = Table.GetValue (row, 0)
if tmp!='*':
GemRB.PlaySound (tmp)
tmp = Table.GetValue (row, 1)
if tmp!='*':
Button.SetSprites (tmp, 0, 0, 0, 0, 0 )
# Done
Button = Window.GetControl (51)
if GUICommon.GameIsPST():
Button.SetText (1403)
else:
Button.SetText (11973)
Button.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, LeaveContainer)
GemRB.SetVar ("LeftTopIndex", 0)
GemRB.SetVar ("RightTopIndex", 0)
UpdateContainerWindow ()
if hideflag:
GemRB.UnhideGUI ()
def CloseContainerWindow ():
global ContainerWindow
if not ContainerWindow:
return
hideflag = GemRB.HideGUI ()
ContainerWindow.Unload ()
if GUICommon.GameIsPST():
GUICommonWindows.EnableAnimatedWindows ()
GemRB.SetVar ("PortraitWindow", GUIWORLD.OldPortraitWindow.ID)
# FIXME: iwd2 bug or just bad naming?
if GUICommon.GameIsIWD2():
GemRB.SetVar ("MessageWindow", GUIWORLD.OldMessageWindow.ID)
else:
GemRB.SetVar ("ActionsWindow", GUIWORLD.OldActionsWindow.ID)
GemRB.SetVar ("MessageWindow", GUIWORLD.OldMessageWindow.ID)
Table = GemRB.LoadTable ("containr")
row = Container['Type']
tmp = Table.GetValue (row, 2)
#play closing sound if applicable
if tmp!='*':
GemRB.PlaySound (tmp)
#it is enough to close here
if hideflag:
GemRB.UnhideGUI ()
#doing this way it will inform the core system too, which in turn will call
#CloseContainerWindow ()
def LeaveContainer ():
GemRB.LeaveContainer()
def DropItemContainer ():
RightIndex = GemRB.GetVar ("RightIndex")
if RightIndex < 0:
return
#we need to get the right slot number
pc = GemRB.GameGetFirstSelectedPC ()
inventory_slots = GemRB.GetSlots (pc, 0x8000)
if RightIndex >= len(inventory_slots):
return
GemRB.ChangeContainerItem (0, inventory_slots[RightIndex], 0)
UpdateContainerWindow ()
def TakeItemContainer ():
LeftIndex = GemRB.GetVar ("LeftIndex")
if LeftIndex < 0:
return
if LeftIndex >= Container['ItemCount']:
return
GemRB.ChangeContainerItem (0, LeftIndex, 1)
UpdateContainerWindow ()
| gpl-2.0 |
dracos/QGIS | python/ext-libs/pygments/lexers/hdl.py | 363 | 16209 | # -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \
Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
*New in Pygments 1.4.*
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(r'(always|always_comb|always_ff|always_latch|and|assign|automatic|'
r'begin|break|buf|bufif0|bufif1|case|casex|casez|cmos|const|'
r'continue|deassign|default|defparam|disable|do|edge|else|end|endcase|'
r'endfunction|endgenerate|endmodule|endpackage|endprimitive|endspecify|'
r'endtable|endtask|enum|event|final|for|force|forever|fork|function|'
r'generate|genvar|highz0|highz1|if|initial|inout|input|'
r'integer|join|large|localparam|macromodule|medium|module|'
r'nand|negedge|nmos|nor|not|notif0|notif1|or|output|packed|'
r'parameter|pmos|posedge|primitive|pull0|pull1|pulldown|pullup|rcmos|'
r'ref|release|repeat|return|rnmos|rpmos|rtran|rtranif0|'
r'rtranif1|scalared|signed|small|specify|specparam|strength|'
r'string|strong0|strong1|struct|table|task|'
r'tran|tranif0|tranif1|type|typedef|'
r'unsigned|var|vectored|void|wait|weak0|weak1|while|'
r'xnor|xor)\b', Keyword),
(r'`(accelerate|autoexpand_vectornets|celldefine|default_nettype|'
r'else|elsif|endcelldefine|endif|endprotect|endprotected|'
r'expand_vectornets|ifdef|ifndef|include|noaccelerate|noexpand_vectornets|'
r'noremove_gatenames|noremove_netnames|nounconnected_drive|'
r'protect|protected|remove_gatenames|remove_netnames|resetall|'
r'timescale|unconnected_drive|undef)\b', Comment.Preproc),
(r'\$(bits|bitstoreal|bitstoshortreal|countdrivers|display|fclose|'
r'fdisplay|finish|floor|fmonitor|fopen|fstrobe|fwrite|'
r'getpattern|history|incsave|input|itor|key|list|log|'
r'monitor|monitoroff|monitoron|nokey|nolog|printtimescale|'
r'random|readmemb|readmemh|realtime|realtobits|reset|reset_count|'
r'reset_value|restart|rtoi|save|scale|scope|shortrealtobits|'
r'showscopes|showvariables|showvars|sreadmemb|sreadmemh|'
r'stime|stop|strobe|time|timeformat|write)\b', Name.Builtin),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
*New in Pygments 1.5.*
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'(accept_on|alias|always|always_comb|always_ff|always_latch|'
r'and|assert|assign|assume|automatic|before|begin|bind|bins|'
r'binsof|bit|break|buf|bufif0|bufif1|byte|case|casex|casez|'
r'cell|chandle|checker|class|clocking|cmos|config|const|constraint|'
r'context|continue|cover|covergroup|coverpoint|cross|deassign|'
r'default|defparam|design|disable|dist|do|edge|else|end|endcase|'
r'endchecker|endclass|endclocking|endconfig|endfunction|endgenerate|'
r'endgroup|endinterface|endmodule|endpackage|endprimitive|'
r'endprogram|endproperty|endsequence|endspecify|endtable|'
r'endtask|enum|event|eventually|expect|export|extends|extern|'
r'final|first_match|for|force|foreach|forever|fork|forkjoin|'
r'function|generate|genvar|global|highz0|highz1|if|iff|ifnone|'
r'ignore_bins|illegal_bins|implies|import|incdir|include|'
r'initial|inout|input|inside|instance|int|integer|interface|'
r'intersect|join|join_any|join_none|large|let|liblist|library|'
r'local|localparam|logic|longint|macromodule|matches|medium|'
r'modport|module|nand|negedge|new|nexttime|nmos|nor|noshowcancelled|'
r'not|notif0|notif1|null|or|output|package|packed|parameter|'
r'pmos|posedge|primitive|priority|program|property|protected|'
r'pull0|pull1|pulldown|pullup|pulsestyle_ondetect|pulsestyle_onevent|'
r'pure|rand|randc|randcase|randsequence|rcmos|real|realtime|'
r'ref|reg|reject_on|release|repeat|restrict|return|rnmos|'
r'rpmos|rtran|rtranif0|rtranif1|s_always|s_eventually|s_nexttime|'
r's_until|s_until_with|scalared|sequence|shortint|shortreal|'
r'showcancelled|signed|small|solve|specify|specparam|static|'
r'string|strong|strong0|strong1|struct|super|supply0|supply1|'
r'sync_accept_on|sync_reject_on|table|tagged|task|this|throughout|'
r'time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|'
r'tri1|triand|trior|trireg|type|typedef|union|unique|unique0|'
r'unsigned|until|until_with|untyped|use|uwire|var|vectored|'
r'virtual|void|wait|wait_order|wand|weak|weak0|weak1|while|'
r'wildcard|wire|with|within|wor|xnor|xor)\b', Keyword ),
(r'(`__FILE__|`__LINE__|`begin_keywords|`celldefine|`default_nettype|'
r'`define|`else|`elsif|`end_keywords|`endcelldefine|`endif|'
r'`ifdef|`ifndef|`include|`line|`nounconnected_drive|`pragma|'
r'`resetall|`timescale|`unconnected_drive|`undef|`undefineall)\b',
Comment.Preproc ),
(r'(\$display|\$displayb|\$displayh|\$displayo|\$dumpall|\$dumpfile|'
r'\$dumpflush|\$dumplimit|\$dumpoff|\$dumpon|\$dumpports|'
r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|'
r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|'
r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|'
r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|'
r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|'
r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|'
r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|'
r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|'
r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|'
r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|'
r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|'
r'\$writememh|\$writeo)\b' , Name.Builtin ),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
def analyse_text(text):
if text.startswith('//') or text.startswith('/*'):
return 0.5
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
*New in Pygments 1.5.*
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-zA-Z_][a-zA-Z0-9_]*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\]*"', String),
(r'(library)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-zA-Z_][\.a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(entity|component)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)'
r'(of)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'endblock': [
include('keywords'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(r'(boolean|bit|character|severity_level|integer|time|delay_length|'
r'natural|positive|string|bit_vector|file_open_kind|'
r'file_open_status|std_ulogic|std_ulogic_vector|std_logic|'
r'std_logic_vector)\b', Keyword.Type),
],
'keywords': [
(r'(abs|access|after|alias|all|and|'
r'architecture|array|assert|attribute|begin|block|'
r'body|buffer|bus|case|component|configuration|'
r'constant|disconnect|downto|else|elsif|end|'
r'entity|exit|file|for|function|generate|'
r'generic|group|guarded|if|impure|in|'
r'inertial|inout|is|label|library|linkage|'
r'literal|loop|map|mod|nand|new|'
r'next|nor|not|null|of|on|'
r'open|or|others|out|package|port|'
r'postponed|procedure|process|pure|range|record|'
r'register|reject|return|rol|ror|select|'
r'severity|signal|shared|sla|sli|sra|'
r'srl|subtype|then|to|transport|type|'
r'units|until|use|variable|wait|when|'
r'while|with|xnor|xor)\b', Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-fA-F_]+#?', Number.Integer),
(r'[0-1_]+(\.[0-1_])', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'H"[0-9a-fA-F_]+"', Number.Oct),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[0-1_]+"', Number.Oct),
],
}
| gpl-2.0 |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/pty.py | 173 | 5058 | """Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name).
SGI and generic BSD version, for when openpty() fails."""
try:
import sgi
except ImportError:
pass
else:
try:
tty_name, master_fd = sgi._getpty(os.O_RDWR, 0666, 0)
except IOError, msg:
raise os.error, msg
return master_fd, tty_name
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except os.error:
continue
return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices'
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except IOError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data != '':
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
fds.remove(master_fd)
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except (IOError, OSError):
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
| mit |
chauhanmohit/phantomjs | src/qt/qtwebkit/Tools/jhbuild/jhbuildutils.py | 117 | 1843 | import glob
import os.path
import sys
import __builtin__
top_level_dir = None
def top_level_path(*args):
global top_level_dir
if not top_level_dir:
top_level_dir = os.path.join(os.path.dirname(__file__), '..', '..')
return os.path.join(*(top_level_dir,) + args)
def get_dependencies_path():
if 'WEBKIT_OUTPUTDIR' in os.environ:
return os.path.abspath(os.path.join(os.environ['WEBKIT_OUTPUTDIR'], 'Dependencies'))
else:
return os.path.abspath(top_level_path('WebKitBuild', 'Dependencies'))
def get_config_file_for_platform(platform):
return top_level_path('Tools', platform, 'jhbuildrc')
def enter_jhbuild_environment_if_available(platform):
if not os.path.exists(get_dependencies_path()):
return False
# Sometimes jhbuild chooses to install in a way that reads the library from the source directory, so fall
# back to that method.
source_path = os.path.join(get_dependencies_path(), "Source", "jhbuild")
sys.path.insert(0, source_path)
# When loading jhbuild from the source checkout it fails if the SRCDIR variable is not set.
__builtin__.__dict__['SRCDIR'] = source_path
# We don't know the Python version, so we just assume that we can safely take the first one in the list.
site_packages_path = glob.glob(os.path.join(get_dependencies_path(), "Root", "lib", "*", "site-packages"))
if len(site_packages_path):
site_packages_path = site_packages_path[0]
sys.path.insert(0, site_packages_path)
try:
import jhbuild.config
from jhbuild.errors import FatalError
config = jhbuild.config.Config(get_config_file_for_platform(platform))
except FatalError, exception:
sys.stderr.write('Could not load jhbuild config file: %s\n' % exception.args[0])
return False
return True
| bsd-3-clause |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/numpy/doc/basics.py | 7 | 7905 | """
============
Array basics
============
Array types and conversions between types
=========================================
NumPy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
============ ==========================================================
Data type Description
============ ==========================================================
``bool_`` Boolean (True or False) stored as a byte
``int_`` Default integer type (same as C ``long``; normally either
``int64`` or ``int32``)
intc Identical to C ``int`` (normally ``int32`` or ``int64``)
intp Integer used for indexing (same as C ``ssize_t``; normally
either ``int32`` or ``int64``)
int8 Byte (-128 to 127)
int16 Integer (-32768 to 32767)
int32 Integer (-2147483648 to 2147483647)
int64 Integer (-9223372036854775808 to 9223372036854775807)
uint8 Unsigned integer (0 to 255)
uint16 Unsigned integer (0 to 65535)
uint32 Unsigned integer (0 to 4294967295)
uint64 Unsigned integer (0 to 18446744073709551615)
``float_`` Shorthand for ``float64``.
float16 Half precision float: sign bit, 5 bits exponent,
10 bits mantissa
float32 Single precision float: sign bit, 8 bits exponent,
23 bits mantissa
float64 Double precision float: sign bit, 11 bits exponent,
52 bits mantissa
``complex_`` Shorthand for ``complex128``.
complex64 Complex number, represented by two 32-bit floats (real
and imaginary components)
complex128 Complex number, represented by two 64-bit floats (real
and imaginary components)
============ ==========================================================
Additionally to ``intc`` the platform dependent C integer types ``short``,
``long``, ``longlong`` and their unsigned versions are defined.
NumPy numerical types are instances of ``dtype`` (data-type) objects, each
having unique characteristics. Once you have imported NumPy using
::
>>> import numpy as np
the dtypes are available as ``np.bool_``, ``np.float32``, etc.
Advanced types, not listed in the table above, are explored in
section :ref:`structured_arrays`.
There are 5 basic numerical types representing booleans (bool), integers (int),
unsigned integers (uint) floating point (float) and complex. Those with numbers
in their name indicate the bitsize of the type (i.e. how many bits are needed
to represent a single value in memory). Some types, such as ``int`` and
``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit
vs. 64-bit machines). This should be taken into account when interfacing
with low-level code (such as C or Fortran) where the raw memory is addressed.
Data-types can be used as functions to convert python numbers to array scalars
(see the array scalar section for an explanation), python sequences of numbers
to arrays of that type, or as arguments to the dtype keyword that many numpy
functions or methods accept. Some examples::
>>> import numpy as np
>>> x = np.float32(1.0)
>>> x
1.0
>>> y = np.int_([1,2,4])
>>> y
array([1, 2, 4])
>>> z = np.arange(3, dtype=np.uint8)
>>> z
array([0, 1, 2], dtype=uint8)
Array types can also be referred to by character codes, mostly to retain
backward compatibility with older packages such as Numeric. Some
documentation may still refer to these, for example::
>>> np.array([1, 2, 3], dtype='f')
array([ 1., 2., 3.], dtype=float32)
We recommend using dtype objects instead.
To convert the type of an array, use the .astype() method (preferred) or
the type itself as a function. For example: ::
>>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
array([ 0., 1., 2.])
>>> np.int8(z)
array([0, 1, 2], dtype=int8)
Note that, above, we use the *Python* float object as a dtype. NumPy knows
that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
The other data-types do not have Python equivalents.
To determine the type of an array, look at the dtype attribute::
>>> z.dtype
dtype('uint8')
dtype objects also contain information about the type, such as its bit-width
and its byte-order. The data type can also be used indirectly to query
properties of the type, such as whether it is an integer::
>>> d = np.dtype(int)
>>> d
dtype('int32')
>>> np.issubdtype(d, int)
True
>>> np.issubdtype(d, float)
False
Array Scalars
=============
NumPy generally returns elements of arrays as array scalars (a scalar
with an associated dtype). Array scalars differ from Python scalars, but
for the most part they can be used interchangeably (the primary
exception is for versions of Python older than v2.x, where integer array
scalars cannot act as indices for lists and tuples). There are some
exceptions, such as when code requires very specific attributes of a scalar
or when it checks specifically whether a value is a Python scalar. Generally,
problems are easily fixed by explicitly converting array scalars
to Python scalars, using the corresponding Python type function
(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``).
The primary advantage of using array scalars is that
they preserve the array type (Python may not have a matching scalar type
available, e.g. ``int16``). Therefore, the use of array scalars ensures
identical behaviour between arrays and scalars, irrespective of whether the
value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
Extended Precision
==================
Python's floating-point numbers are usually 64-bit floating-point numbers,
nearly equivalent to ``np.float64``. In some unusual situations it may be
useful to use floating-point numbers with more precision. Whether this
is possible in numpy depends on the hardware and on the development
environment: specifically, x86 machines provide hardware floating-point
with 80-bit precision, and while most C compilers provide this as their
``long double`` type, MSVC (standard for Windows builds) makes
``long double`` identical to ``double`` (64 bits). NumPy makes the
compiler's ``long double`` available as ``np.longdouble`` (and
``np.clongdouble`` for the complex numbers). You can find out what your
numpy provides with ``np.finfo(np.longdouble)``.
NumPy does not provide a dtype with more precision than C
``long double``\\s; in particular, the 128-bit IEEE quad precision
data type (FORTRAN's ``REAL*16``\\) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
padded with zero bits, either to 96 or 128 bits. Which is more efficient
depends on hardware and development environment; typically on 32-bit
systems they are padded to 96 bits, while on 64-bit systems they are
typically padded to 128 bits. ``np.longdouble`` is padded to the system
default; ``np.float96`` and ``np.float128`` are provided for users who
want specific padding. In spite of the names, ``np.float96`` and
``np.float128`` provide only as much precision as ``np.longdouble``,
that is, 80 bits on most x86 machines and 64 bits in standard
Windows builds.
Be warned that even if ``np.longdouble`` offers more precision than
python ``float``, it is easy to lose that extra precision, since
python often forces values to pass through ``float``. For example,
the ``%`` formatting operator requires its arguments to be converted
to standard python types, and it is therefore impossible to preserve
extended precision even if many decimal places are requested. It can
be useful to test your code with the value
``1 + np.finfo(np.longdouble).eps``.
"""
from __future__ import division, absolute_import, print_function
| apache-2.0 |
isnnn/Sick-Beard-TPB | lib/html5lib/treewalkers/genshistream.py | 128 | 2431 | from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from genshi.output import NamespaceFlattener
import _base
from html5lib.constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
depth = 0
ignore_until = None
previous = None
for event in self.tree:
if previous is not None:
if previous[0] == START:
depth += 1
if ignore_until <= depth:
ignore_until = None
if ignore_until is None:
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = depth
if previous[0] == END:
depth -= 1
previous = event
if previous is not None:
if ignore_until is None or ignore_until <= depth:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attrib = data
name = tag.localname
namespace = tag.namespace
if tag in voidElements:
for token in self.emptyTag(namespace, name, list(attrib),
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, list(attrib))
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
| gpl-3.0 |
romain-dartigues/ansible | lib/ansible/modules/network/f5/bigip_device_auth_ldap.py | 7 | 25083 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_auth_ldap
short_description: Manage LDAP device authentication settings on BIG-IP
description:
- Manage LDAP device authentication settings on BIG-IP.
version_added: 2.8
options:
servers:
description:
- Specifies the LDAP servers that the system must use to obtain
authentication information. You must specify a server when you
create an LDAP configuration object.
port:
description:
- Specifies the port that the system uses for access to the remote host server.
- When configuring LDAP device authentication for the first time, if this parameter
is not specified, the default port is C(389).
remote_directory_tree:
description:
- Specifies the file location (tree) of the user authentication database on the
server.
scope:
description:
- Specifies the level of the remote Active Directory or LDAP directory that the
system should search for the user authentication.
choices:
- sub
- one
- base
bind_dn:
description:
- Specifies the distinguished name for the Active Directory or LDAP server user
ID.
- The BIG-IP client authentication module does not support Active Directory or
LDAP servers that do not perform bind referral when authenticating referred
accounts.
- Therefore, if you plan to use Active Directory or LDAP as your authentication
source and want to use referred accounts, make sure your servers perform bind
referral.
bind_password:
description:
- Specifies a password for the Active Directory or LDAP server user ID.
user_template:
description:
- Specifies the distinguished name of the user who is logging on.
- You specify the template as a variable that the system replaces with user-specific
information during the logon attempt.
- For example, you could specify a user template such as C(%s@siterequest.com) or
C(uxml:id=%s,ou=people,dc=siterequest,dc=com).
- When a user attempts to log on, the system replaces C(%s) with the name the user
specified in the Basic Authentication dialog box, and passes that as the
distinguished name for the bind operation.
- The system passes the associated password as the password for the bind operation.
- This field can contain only one C(%s) and cannot contain any other format
specifiers.
check_member_attr:
description:
- Checks the user's member attribute in the remote LDAP or AD group.
type: bool
ssl:
description:
- Specifies whether the system uses an SSL port to communicate with the LDAP server.
choices:
- "yes"
- "no"
- start-tls
ssl_ca_cert:
description:
- Specifies the name of an SSL certificate from a certificate authority (CA).
- To remove this value, use the reserved value C(none).
ssl_client_key:
description:
- Specifies the name of an SSL client key.
- To remove this value, use the reserved value C(none).
ssl_client_cert:
description:
- Specifies the name of an SSL client certificate.
- To remove this value, use the reserved value C(none).
ssl_check_peer:
description:
- Specifies whether the system checks an SSL peer, as a result of which the
system requires and verifies the server certificate.
type: bool
login_ldap_attr:
description:
- Specifies the LDAP directory attribute containing the local user name that is
associated with the selected directory entry.
- When configuring LDAP device authentication for the first time, if this parameter
is not specified, the default port is C(samaccountname).
fallback_to_local:
description:
- Specifies that the system uses the Local authentication method if the remote
authentication method is not available.
type: bool
state:
description:
- When C(present), ensures the device authentication method exists.
- When C(absent), ensures the device authentication method does not exist.
default: present
choices:
- present
- absent
update_password:
description:
- C(always) will always update the C(bind_password).
- C(on_create) will only set the C(bind_password) for newly created authentication
mechanisms.
default: always
choices:
- always
- on_create
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create an LDAP authentication object
bigip_device_auth_ldap:
name: foo
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
param1:
description: The new param1 value of the resource.
returned: changed
type: bool
sample: true
param2:
description: The new param2 value of the resource.
returned: changed
type: string
sample: Foo is bar
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'bindDn': 'bind_dn',
'bindPw': 'bind_password',
'userTemplate': 'user_template',
'fallback': 'fallback_to_local',
'loginAttribute': 'login_ldap_attr',
'sslCheckPeer': 'ssl_check_peer',
'sslClientCert': 'ssl_client_cert',
'sslClientKey': 'ssl_client_key',
'sslCaCertFile': 'ssl_ca_cert',
'checkRolesGroup': 'check_member_attr',
'searchBaseDn': 'remote_directory_tree',
}
api_attributes = [
'bindDn',
'bindPw',
'checkRolesGroup',
'loginAttribute',
'port',
'scope',
'searchBaseDn',
'servers',
'ssl',
'sslCaCertFile',
'sslCheckPeer',
'sslClientCert',
'sslClientKey',
'userTemplate',
]
returnables = [
'bind_dn',
'bind_password',
'check_member_attr',
'fallback_to_local',
'login_ldap_attr',
'port',
'remote_directory_tree',
'scope',
'servers',
'ssl',
'ssl_ca_cert',
'ssl_check_peer',
'ssl_client_cert',
'ssl_client_key',
'user_template',
]
updatables = [
'bind_dn',
'bind_password',
'check_member_attr',
'fallback_to_local',
'login_ldap_attr',
'port',
'remote_directory_tree',
'scope',
'servers',
'ssl',
'ssl_ca_cert',
'ssl_check_peer',
'ssl_client_cert',
'ssl_client_key',
'user_template',
]
@property
def ssl_ca_cert(self):
if self._values['ssl_ca_cert'] is None:
return None
elif self._values['ssl_ca_cert'] in ['none', '']:
return ''
return fq_name(self.partition, self._values['ssl_ca_cert'])
@property
def ssl_client_key(self):
if self._values['ssl_client_key'] is None:
return None
elif self._values['ssl_client_key'] in ['none', '']:
return ''
return fq_name(self.partition, self._values['ssl_client_key'])
@property
def ssl_client_cert(self):
if self._values['ssl_client_cert'] is None:
return None
elif self._values['ssl_client_cert'] in ['none', '']:
return ''
return fq_name(self.partition, self._values['ssl_client_cert'])
@property
def ssl_check_peer(self):
return flatten_boolean(self._values['ssl_check_peer'])
@property
def fallback_to_local(self):
return flatten_boolean(self._values['fallback_to_local'])
@property
def check_member_attr(self):
return flatten_boolean(self._values['check_member_attr'])
@property
def login_ldap_attr(self):
if self._values['login_ldap_attr'] is None:
return None
elif self._values['login_ldap_attr'] in ['none', '']:
return ''
return self._values['login_ldap_attr']
@property
def user_template(self):
if self._values['user_template'] is None:
return None
elif self._values['user_template'] in ['none', '']:
return ''
return self._values['user_template']
@property
def ssl(self):
if self._values['ssl'] is None:
return None
elif self._values['ssl'] == 'start-tls':
return 'start-tls'
return flatten_boolean(self._values['ssl'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def ssl_check_peer(self):
if self._values['ssl_check_peer'] is None:
return None
elif self._values['ssl_check_peer'] == 'yes':
return 'enabled'
return 'disabled'
@property
def fallback_to_local(self):
if self._values['fallback_to_local'] is None:
return None
elif self._values['fallback_to_local'] == 'yes':
return 'true'
return 'false'
@property
def check_member_attr(self):
if self._values['check_member_attr'] is None:
return None
elif self._values['check_member_attr'] == 'yes':
return 'enabled'
return 'disabled'
@property
def ssl(self):
if self._values['ssl'] is None:
return None
elif self._values['ssl'] == 'start-tls':
return 'start-tls'
elif self._values['ssl'] == 'yes':
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def bind_password(self):
return None
@property
def ssl_check_peer(self):
return flatten_boolean(self._values['ssl_check_peer'])
@property
def check_member_attr(self):
return flatten_boolean(self._values['check_member_attr'])
@property
def ssl(self):
if self._values['ssl'] is None:
return None
elif self._values['ssl'] == 'start-tls':
return 'start-tls'
return flatten_boolean(self._values['ssl'])
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def login_ldap_attr(self):
return cmp_str_with_none(self.want.login_ldap_attr, self.have.login_ldap_attr)
@property
def user_template(self):
return cmp_str_with_none(self.want.user_template, self.have.user_template)
@property
def ssl_ca_cert(self):
return cmp_str_with_none(self.want.ssl_ca_cert, self.have.ssl_ca_cert)
@property
def ssl_client_key(self):
return cmp_str_with_none(self.want.ssl_client_key, self.have.ssl_client_key)
@property
def ssl_client_cert(self):
return cmp_str_with_none(self.want.ssl_client_cert, self.have.ssl_client_cert)
@property
def bind_password(self):
if self.want.bind_password != self.have.bind_password and self.want.update_password == 'always':
return self.want.bind_password
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def update_auth_source_on_device(self, source):
"""Set the system auth source.
Configuring the authentication source is only one step in the process of setting
up an auth source. The other step is to inform the system of the auth source
you want to use.
This method is used for situations where
* The ``use_for_auth`` parameter is set to ``yes``
* The ``use_for_auth`` parameter is set to ``no``
* The ``state`` parameter is set to ``absent``
When ``state`` equal to ``absent``, before you can delete the TACACS+ configuration,
you must set the system auth to "something else". The system ships with a system
auth called "local", so this is the logical "something else" to use.
When ``use_for_auth`` is no, the same situation applies as when ``state`` equal
to ``absent`` is done above.
When ``use_for_auth`` is ``yes``, this method will set the current system auth
state to TACACS+.
Arguments:
source (string): The source that you want to set on the device.
"""
params = dict(
type=source
)
uri = 'https://{0}:{1}/mgmt/tm/auth/source/'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_fallback_on_device(self, fallback):
params = dict(
fallback=fallback
)
uri = 'https://{0}:{1}/mgmt/tm/auth/source/'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
if self.want.fallback_to_local == 'yes':
self.update_fallback_on_device('true')
elif self.want.fallback_to_local == 'no':
self.update_fallback_on_device('false')
return True
def remove(self):
if self.module.check_mode:
return True
self.update_auth_source_on_device('local')
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if self.want.fallback_to_local == 'yes':
self.update_fallback_on_device('true')
elif self.want.fallback_to_local == 'no':
self.update_fallback_on_device('false')
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = 'system-auth'
params['partition'] = 'Common'
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
if not params:
return
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = ApiParameters(params=response)
uri = 'https://{0}:{1}/mgmt/tm/auth/source/'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result.update({'fallback': response['fallback']})
return result
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
servers=dict(type='list'),
port=dict(type='int'),
remote_directory_tree=dict(),
scope=dict(
choices=['sub', 'one', 'base']
),
bind_dn=dict(),
bind_password=dict(no_log=True),
user_template=dict(),
check_member_attr=dict(type='bool'),
ssl=dict(
choices=['yes', 'no', 'start-tls']
),
ssl_ca_cert=dict(),
ssl_client_key=dict(),
ssl_client_cert=dict(),
ssl_check_peer=dict(type='bool'),
login_ldap_attr=dict(),
fallback_to_local=dict(type='bool'),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
state=dict(default='present', choices=['absent', 'present']),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
vicnet/weboob | modules/audioaddict/test.py | 2 | 1771 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Pierre Mazière
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.radio import Radio
from weboob.tools.value import Value
class AudioAddictTest(BackendTest):
MODULE = 'audioaddict'
def setUp(self):
if not self.is_backend_configured():
self.backend.config['networks'] = Value(value='RockRadio RadioTunes JazzRadio DI ClassicalRadio')
self.backend.config['quality'] = Value(value='l')
def test_audioaddict(self):
ls = list(self.backend.iter_resources((Radio, ), []))
self.assertTrue(len(ls) > 0)
search = list(self.backend.iter_radios_search('classic'))
self.assertTrue(len(search) > 0)
radio = self.backend.get_radio('classicrock.RockRadio')
self.assertTrue(radio.title)
self.assertTrue(radio.description)
self.assertTrue(radio.current.who)
self.assertTrue(radio.current.what)
self.assertTrue(radio.streams[0].url)
self.assertTrue(radio.streams[0].title)
| lgpl-3.0 |
tareqalayan/ansible | lib/ansible/modules/cloud/ovirt/ovirt_affinity_label.py | 78 | 7067 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_label
short_description: Module to manage affinity labels in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage affinity labels in oVirt/RHV. It can also manage assignments
of those labels to hosts and VMs."
options:
name:
description:
- "Name of the affinity label to manage."
required: true
state:
description:
- "Should the affinity label be present or absent."
choices: ['present', 'absent']
default: present
cluster:
description:
- "Name of the cluster where vms and hosts resides."
vms:
description:
- "List of the VMs names, which should have assigned this affinity label."
hosts:
description:
- "List of the hosts names, which should have assigned this affinity label."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1
- ovirt_affinity_label:
name: mylabel
cluster: mycluster
vms:
- vm1
- vm2
hosts:
- host1
# To detach all VMs from label
- ovirt_affinity_label:
name: mylabel
cluster: mycluster
vms: []
# Remove affinity label
- ovirt_affinity_label:
state: absent
name: mylabel
'''
RETURN = '''
id:
description: ID of the affinity label which is managed
returned: On success if affinity label is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
affinity_label:
description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
type: dict
returned: On success if affinity label is found.
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from collections import defaultdict
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
ovirt_full_argument_spec,
)
class AffinityLabelsModule(BaseModule):
def build_entity(self):
return otypes.AffinityLabel(name=self._module.params['name'])
def post_create(self, entity):
self.update_check(entity)
def pre_remove(self, entity):
self._module.params['vms'] = []
self._module.params['hosts'] = []
self.update_check(entity)
def _update_label_assignments(self, entity, name, label_obj_type):
objs_service = getattr(self._connection.system_service(), '%s_service' % name)()
if self._module.params[name] is not None:
objs = self._connection.follow_link(getattr(entity, name))
objs_names = defaultdict(list)
for obj in objs:
labeled_entity = objs_service.service(obj.id).get()
if self._module.params['cluster'] is None:
objs_names[labeled_entity.name].append(obj.id)
elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']:
objs_names[labeled_entity.name].append(obj.id)
for obj in self._module.params[name]:
if obj not in objs_names:
for obj_id in objs_service.list(
search='name=%s and cluster=%s' % (obj, self._module.params['cluster'])
):
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
label_service.add(**{
name[:-1]: label_obj_type(id=obj_id.id)
})
self.changed = True
for obj in objs_names:
if obj not in self._module.params[name]:
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
for obj_id in objs_names[obj]:
label_service.service(obj_id).remove()
self.changed = True
def update_check(self, entity):
self._update_label_assignments(entity, 'vms', otypes.Vm)
self._update_label_assignments(entity, 'hosts', otypes.Host)
return True
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
cluster=dict(default=None),
name=dict(default=None, required=True),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['cluster']),
],
)
if module._name == 'ovirt_affinity_labels':
module.deprecate("The 'ovirt_affinity_labels' module is being renamed 'ovirt_affinity_label'", version=2.8)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
affinity_labels_service = connection.system_service().affinity_labels_service()
affinity_labels_module = AffinityLabelsModule(
connection=connection,
module=module,
service=affinity_labels_service,
)
state = module.params['state']
if state == 'present':
ret = affinity_labels_module.create()
elif state == 'absent':
ret = affinity_labels_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
MikeWinter/bio-data-repository | bdr/formats/simple/views.py | 1 | 12531 | """
This module defines classes for displaying and editing simple type formats.
"""
try:
# noinspection PyPep8Naming
import cPickle as pickle
except ImportError:
import pickle
import os.path
import unicodedata
from django.forms.formsets import formset_factory
from django.http import StreamingHttpResponse
from django.shortcuts import redirect
from ...models.simple import SimpleFormat, SimpleRevision
from ...views.formats import FormatDetailView, FormatCreateView, FormatEditView, FormatDeleteView
from ...views.revisions import RevisionExportView
from .forms import (SimpleFormatForm, SimpleFormatExportOptionsForm, SimpleFormatFieldForm, SimpleFormatFieldFormSet,
SimpleFormatFieldSelectionForm, SimpleFormatFieldSelectionFormSet)
__all__ = ["Record", "Reader", "Writer"]
__author__ = "Michael Winter (mail@michael-winter.me.uk)"
__license__ = """
Biological Dataset Repository: data archival and retrieval.
Copyright (C) 2015 Michael Winter
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
class SimpleFormatDetailView(FormatDetailView):
"""
This view displays the details of a format, including a list of its fields.
"""
CONTROL_NAMES = {
u"\a": "Bell (BEL)",
u"\b": "Backspace (BS)",
u"\f": "Form feed (FF)",
u"\n": "Line feed (LF)",
u"\r": "Carriage return (CR)",
u"\t": "Horizontal tab (TAB)",
u"\v": "Vertical tab (VT)",
}
model = SimpleFormat
template_name = "bdr/formats/simple/detail.html"
def get_context_data(self, **kwargs):
"""
Return the template context for this view.
This method returns a dictionary containing variables for the rendered
view. Available template context variables are:
* ``fields`` - a list of fields used by this instance
* ``format`` - the format model
:param kwargs: A mapping of extra data available for use in templates.
:type kwargs: dict of str
:return: A dictionary of template variables and values.
:rtype: dict of str
"""
context = super(SimpleFormatDetailView, self).get_context_data(**kwargs)
for attr in ["comment", "escape", "quote", "separator"]:
character = getattr(self.object, attr, "")
if character != "" and self.is_symbol(character):
context[attr] = self.to_description(character)
context["fields"] = self.object.fields
return context
@classmethod
def is_control(cls, character):
"""
Return ``True`` if a character is in the Unicode control category.
:param character: The character to test.
:type character: unicode
:return: ``True`` if the character is a control character.
:rtype: bool
"""
return unicodedata.category(character).startswith(("Z", "C"))
@classmethod
def is_symbol(cls, character):
"""
Return ``True`` if a character is in the Unicode symbol category.
:param character: The character to test.
:type character: unicode
:return: ``True`` if the character is a symbol.
:rtype: bool
"""
return not unicodedata.category(character).startswith(("L", "N"))
@classmethod
def to_description(cls, character):
"""
Return a string that describes the given symbol character.
:param character: The symbol to describe.
:type character: unicode
:return: The description.
:rtype: unicode
"""
try:
name = unicodedata.name(character).capitalize()
except ValueError:
code_point = "U+{:04X}".format(ord(character))
name = cls.CONTROL_NAMES.get(character, code_point)
if cls.is_control(character):
return name
return u"{:s} ({:s})".format(name, character)
class SimpleFormatCreateView(FormatCreateView):
"""Used to create a new instance of the simple format type."""
form_list = [
("options", SimpleFormatForm),
("fields", formset_factory(SimpleFormatFieldForm, formset=SimpleFormatFieldFormSet, can_delete=True,
can_order=True)),
]
templates = {
"options": "bdr/formats/simple/create_options.html",
"fields": "bdr/formats/simple/create_fields.html",
}
def get_form(self, step=None, data=None, files=None):
"""
Construct a form for a given ``step``. If no ``step`` is defined, the
current step will be determined automatically.
The form will be initialized using the ``data`` argument to pre-fill
the new form. If needed, instance or queryset (for ``ModelForm`` or
``ModelFormSet``) will be added too.
:param step: The name of the current step.
:type step: str
:param data: A dictionary containing request data received from the
user.
:type data: dict of unicode
:param files: A dictionary containing file data received from the user.
:type files: dict of str
:return: The ``Form`` instance for this step.
:rtype: Form
"""
form = super(SimpleFormatCreateView, self).get_form(step, data, files)
if step == "fields" and self.request.POST.get("operation") == "add":
form.add_extra_form()
return form
def done(self, form_list, **kwargs):
"""
Add the format definition to the repository and redirect to its new
summary page.
:param form_list: A list of the forms presented to the user.
:type form_list: list of django.forms.Form
:param kwargs: The keyword arguments extracted from the URL route.
:type kwargs: dict of str
:return: A redirect response to a view of the format.
:rtype: HttpResponseRedirect
"""
options_form, fields_form = form_list
instance = options_form.save(commit=False)
instance.entry_point_name = "simple"
for field, value in options_form.cleaned_metadata.items():
setattr(instance, field, value)
instance.fields = [field.cleaned_definition for field in fields_form.ordered_forms]
instance.save()
options_form.save_m2m()
return redirect(instance)
class SimpleFormatEditView(FormatEditView):
"""Used to edit an existing instance of the simple format type."""
form_list = [
("options", SimpleFormatForm),
("fields", formset_factory(SimpleFormatFieldForm, formset=SimpleFormatFieldFormSet, can_delete=True,
can_order=True)),
]
model = SimpleFormat
templates = {
"options": "bdr/formats/simple/edit_options.html",
"fields": "bdr/formats/simple/edit_fields.html",
}
def get_form_initial(self, step):
"""
Return a dictionary which will define the initial data for the form for
``step``. If no initial data was provided while initializing the form
wizard, a empty dictionary will be returned.
:param step: The name of the current step.
:type step: str
:return: The initial form data.
:rtype: dict of unicode
"""
if step == "fields":
return self.object.fields
return super(SimpleFormatEditView, self).get_form_initial(step)
def get_form_instance(self, step):
"""
Return a model instance which will be passed to the form for ``step``.
If no instance object was provided while initializing the form wizard,
None will be returned.
:param step: The name of the current step.
:type step: str
:return: The model object.
:rtype: Model
"""
if step == "options":
return self.object
return super(SimpleFormatEditView, self).get_form_instance(step)
def done(self, form_list, **kwargs):
"""
Modify the format definition and redirect to its summary page.
:param form_list: A list of the forms presented to the user.
:type form_list: list of django.forms.Form
:param kwargs: The keyword arguments extracted from the URL route.
:type kwargs: dict of str
:return: A redirect response to a view of the format.
:rtype: HttpResponseRedirect
"""
options_form, fields_form = form_list
if options_form.has_changed() or fields_form.has_changed():
self.object = options_form.save(commit=False)
for field, value in options_form.cleaned_metadata.items():
setattr(self.object, field, value)
self.object.fields = [field.cleaned_definition for field in fields_form.ordered_forms]
self.object.save()
options_form.save_m2m()
return redirect(self.object)
class SimpleFormatDeleteView(FormatDeleteView):
"""
This view is used to confirm the deletion of an existing simple format.
Files and revisions using the deleted format are subsequently marked as
containing raw data.
"""
template_name = "bdr/formats/confirm_delete.html"
class SimpleRevisionExportView(RevisionExportView):
"""
This view streams a compressed file to the client.
File compression is performed on the fly.
"""
form_list = [
("options", SimpleFormatExportOptionsForm),
("fields", formset_factory(SimpleFormatFieldSelectionForm, formset=SimpleFormatFieldSelectionFormSet, extra=0)),
]
model = SimpleRevision
templates = {
"options": "bdr/revisions/simple/export_options.html",
"fields": "bdr/revisions/simple/export_fields.html",
}
def done(self, form_list, **kwargs):
"""
Export this revision.
:param form_list: A list of the forms presented to the user.
:type form_list: list of django.forms.Form
:param kwargs: The keyword arguments extracted from the URL route.
:type kwargs: dict of str
:return: The contents of this revision.
:rtype: StreamingHttpResponse
"""
options_form, fields_formset \
= form_list # type: SimpleFormatExportOptionsForm, SimpleFormatFieldSelectionFormSet
field_names = [field["name"] for field in fields_formset.selected]
options = options_form.cleaned_metadata
iterator = self.object.format.convert(self.object.data, field_names, **options)
response = StreamingHttpResponse(iterator, content_type="application/octet-stream")
response["Content-Disposition"] = "attachment; filename={:s}".format(os.path.basename(self.object.file.name))
return response
def get_form_initial(self, step):
"""
Return a dictionary which will define the initial data for the form for
``step``. If no initial data was provided while initializing the form
wizard, a empty dictionary will be returned.
:param step: The name of the current step.
:type step: str
:return: The initial form data.
:rtype: dict of unicode
"""
if step == "fields":
return self.object.format.fields
return super(SimpleRevisionExportView, self).get_form_initial(step)
def get_form_instance(self, step):
"""
Return a model instance which will be passed to the form for ``step``.
If no instance object was provided while initializing the form wizard,
None will be returned.
:param step: The name of the current step.
:type step: str
:return: The model object.
:rtype: Model
"""
if step == "options":
return self.object.format
return super(SimpleRevisionExportView, self).get_form_instance(step)
| gpl-2.0 |
compops/gpo-abc2015 | scripts-paper/example1-gposmc.py | 2 | 5416 | ##############################################################################
##############################################################################
# Estimating the volatility of synthetic data
# using a stochastic volatility (SV) model with Gaussian log-returns.
#
# The SV model is inferred using the GPO-SMC algorithm.
#
# For more details, see https://github.com/compops/gpo-abc2015
#
# (c) 2016 Johan Dahlin
# liu (at) johandahlin.com
#
##############################################################################
##############################################################################
import sys
sys.path.insert(0, '/media/sf_home/src/gpo-abc2015')
# Setup files
output_file = 'results/example1/example1-gposmc'
# Load packages and helpers
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from state import smc
from para import gpo_gpy
from models import hwsv_4parameters
from misc.portfolio import ensure_dir
# Set the seed for re-producibility
np.random.seed(87655678)
##############################################################################
# Arrange the data structures
##############################################################################
sm = smc.smcSampler()
gpo = gpo_gpy.stGPO()
##############################################################################
# Setup the system
##############################################################################
sys = hwsv_4parameters.ssm()
sys.par = np.zeros((sys.nPar, 1))
sys.par[0] = 0.20
sys.par[1] = 0.96
sys.par[2] = 0.15
sys.par[3] = 0.00
sys.T = 500
sys.xo = 0.0
sys.version = "standard"
sys.transformY = "none"
##############################################################################
# Generate data
##############################################################################
sys.generateData(
fileName='data/hwsv_4parameters_syntheticT500.csv', order="xy")
##############################################################################
# Setup the parameters
##############################################################################
th = hwsv_4parameters.ssm()
th.nParInference = 3
th.copyData(sys)
th.version = "standard"
th.transformY = "none"
##############################################################################
# Setup the GPO algorithm
##############################################################################
settings = {'gpo_initPar': np.array([0.00, 0.95, 0.50, 1.80]),
'gpo_upperBounds': np.array([1.00, 1.00, 1.00, 2.00]),
'gpo_lowerBounds': np.array([0.00, 0.00, 0.01, 1.20]),
'gpo_estHypParInterval': 25,
'gpo_preIter': 50,
'gpo_maxIter': 450,
'smc_weightdist': "gaussian",
'smc_tolLevel': 0.10,
'smc_nPart': 2000
}
gpo.initPar = settings['gpo_initPar'][0:th.nParInference]
gpo.upperBounds = settings['gpo_upperBounds'][0:th.nParInference]
gpo.lowerBounds = settings['gpo_lowerBounds'][0:th.nParInference]
gpo.maxIter = settings['gpo_maxIter']
gpo.preIter = settings['gpo_preIter']
gpo.EstimateHyperparametersInterval = settings['gpo_estHypParInterval']
gpo.verbose = True
gpo.jitteringCovariance = 0.01 * np.diag(np.ones(th.nParInference))
gpo.preSamplingMethod = "latinHyperCube"
gpo.EstimateThHatEveryIteration = False
gpo.EstimateHessianEveryIteration = False
##############################################################################
# Setup the SMC algorithm
##############################################################################
sm.filter = sm.bPF
sm.nPart = settings['smc_nPart']
sm.genInitialState = True
sm.xo = sys.xo
th.xo = sys.xo
##############################################################################
# GPO using the Particle filter
##############################################################################
# Run the GPO routine
gpo.bayes(sm, sys, th)
# Estimate inverse Hessian
gpo.estimateHessian()
#############################################################################
# Write results to file
##############################################################################
ensure_dir(output_file + '.csv')
# Model parameters
fileOut = pd.DataFrame(gpo.thhat)
fileOut.to_csv(output_file + '-model.csv')
# Inverse Hessian estimate
fileOut = pd.DataFrame(gpo.invHessianEstimate)
fileOut.to_csv(output_file + '-modelvar.csv')
##############################################################################
# GPO using the Particle filter (comparison with SPSA)
##############################################################################
# Set the seed for re-producibility
np.random.seed(87655678)
# Run the GPO routine
settings['gpo_maxIter'] = 700 - settings['gpo_preIter']
gpo.maxIter = settings['gpo_maxIter']
gpo.EstimateThHatEveryIteration = True
gpo.bayes(sm, sys, th)
# Write output
gpo.writeToFile(sm, fileOutName=output_file + '-run.csv')
##############################################################################
##############################################################################
# End of file
##############################################################################
##############################################################################
| mit |
igor-toga/local-snat | neutron/tests/unit/db/test_l3_dvr_db.py | 1 | 31460 | # Copyright (c) 2014 OpenStack Foundation, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron_lib import constants as l3_const
from neutron_lib import exceptions
from oslo_utils import uuidutils
import testtools
from neutron.common import constants as n_const
from neutron import context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_dvr_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants as plugin_const
from neutron.tests.unit.db import test_db_base_plugin_v2
_uuid = uuidutils.generate_uuid
class FakeL3Plugin(common_db_mixin.CommonDbMixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agents_db.AgentDbMixin):
pass
class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
super(L3DvrTestCase, self).setUp(plugin='ml2')
self.core_plugin = manager.NeutronManager.get_plugin()
self.ctx = context.get_admin_context()
self.mixin = FakeL3Plugin()
def _create_router(self, router):
with self.ctx.session.begin(subtransactions=True):
return self.mixin._create_router_db(self.ctx, router, 'foo_tenant')
def _test__create_router_db(self, expected=False, distributed=None):
router = {'name': 'foo_router', 'admin_state_up': True}
if distributed is not None:
router['distributed'] = distributed
result = self._create_router(router)
self.assertEqual(expected, result.extra_attributes['distributed'])
def test_create_router_db_default(self):
self._test__create_router_db(expected=False)
def test_create_router_db_centralized(self):
self._test__create_router_db(expected=False, distributed=False)
def test_create_router_db_distributed(self):
self._test__create_router_db(expected=True, distributed=True)
def test__validate_router_migration_on_router_update(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertIsNone(self.mixin._validate_router_migration(
self.ctx, router_db, {'name': 'foo_router_2'}))
def test__validate_router_migration_raise_error(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertRaises(exceptions.BadRequest,
self.mixin._validate_router_migration,
self.ctx, router_db, {'distributed': False})
def test_upgrade_active_router_to_distributed_validation_failure(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
update = {'distributed': True}
self.assertRaises(exceptions.BadRequest,
self.mixin._validate_router_migration,
self.ctx, router_db, update)
def test_update_router_db_centralized_to_distributed(self):
router = {'name': 'foo_router', 'admin_state_up': True}
agent = {'id': _uuid()}
distributed = {'distributed': True}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertFalse(router_db.extra_attributes.distributed)
self.mixin._get_router = mock.Mock(return_value=router_db)
self.mixin._validate_router_migration = mock.Mock()
self.mixin._update_distributed_attr = mock.Mock()
self.mixin.list_l3_agents_hosting_router = mock.Mock(
return_value={'agents': [agent]})
self.mixin._unbind_router = mock.Mock()
router_db = self.mixin._update_router_db(
self.ctx, router_id, distributed)
# Assert that the DB value has changed
self.assertTrue(router_db.extra_attributes.distributed)
self.assertEqual(1,
self.mixin._update_distributed_attr.call_count)
def _test_get_device_owner(self, is_distributed=False,
expected=l3_const.DEVICE_OWNER_ROUTER_INTF,
pass_router_id=True):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': is_distributed
}
router_db = self._create_router(router)
router_pass = router_db['id'] if pass_router_id else router_db
with mock.patch.object(self.mixin, '_get_router') as f:
f.return_value = router_db
result = self.mixin._get_device_owner(self.ctx, router_pass)
self.assertEqual(expected, result)
def test_get_device_owner_by_router_id(self):
self._test_get_device_owner()
def test__get_device_owner_centralized(self):
self._test_get_device_owner(pass_router_id=False)
def test__get_device_owner_distributed(self):
self._test_get_device_owner(
is_distributed=True,
expected=l3_const.DEVICE_OWNER_DVR_INTERFACE,
pass_router_id=False)
def _test__is_distributed_router(self, router, expected):
result = l3_dvr_db.is_distributed_router(router)
self.assertEqual(expected, result)
def test__is_distributed_router_by_db_object(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
self.mixin._get_device_owner(mock.ANY, router_db)
def test__is_distributed_router_default(self):
router = {'id': 'foo_router_id'}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_centralized(self):
router = {'id': 'foo_router_id', 'distributed': False}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_distributed(self):
router = {'id': 'foo_router_id', 'distributed': True}
self._test__is_distributed_router(router, True)
def test__get_agent_gw_ports_exist_for_network(self):
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = []
self.mixin._get_agent_gw_ports_exist_for_network(
self.ctx, 'network_id', 'host', 'agent_id')
plugin.get_ports.assert_called_with(self.ctx, {
'network_id': ['network_id'],
'device_id': ['agent_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
def _test_prepare_direct_delete_dvr_internal_ports(self, port):
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_port.return_value = port
self.mixin._router_exists = mock.Mock(return_value=True)
self.assertRaises(exceptions.ServicePortInUse,
self.mixin.prevent_l3_port_deletion,
self.ctx,
port['id'])
def test_prevent_delete_floatingip_agent_gateway_port(self):
port = {
'id': 'my_port_id',
'fixed_ips': mock.ANY,
'device_id': 'r_id',
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW
}
self._test_prepare_direct_delete_dvr_internal_ports(port)
def test_prevent_delete_csnat_port(self):
port = {
'id': 'my_port_id',
'fixed_ips': mock.ANY,
'device_id': 'r_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT
}
self._test_prepare_direct_delete_dvr_internal_ports(port)
def test__create_gw_port_with_no_gateway(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True,
}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertTrue(router_db.extra_attributes.distributed)
with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_create_gw_port'),\
mock.patch.object(
self.mixin,
'_create_snat_intf_ports_if_not_exists') as cs:
self.mixin._create_gw_port(
self.ctx, router_id, router_db, mock.ANY,
mock.ANY)
self.assertFalse(cs.call_count)
def test_build_routers_list_with_gw_port_mismatch(self):
routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}]
gw_ports = {}
routers = self.mixin._build_routers_list(self.ctx, routers, gw_ports)
self.assertIsNone(routers[0].get('gw_port'))
def setup_port_has_ipv6_address(self, port):
with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_port_has_ipv6_address') as pv6:
pv6.return_value = True
result = self.mixin._port_has_ipv6_address(port)
return result, pv6
def test__port_has_ipv6_address_for_dvr_snat_port(self):
port = {
'id': 'my_port_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT,
}
result, pv6 = self.setup_port_has_ipv6_address(port)
self.assertFalse(result)
self.assertFalse(pv6.called)
def test__port_has_ipv6_address_for_non_snat_ports(self):
port = {
'id': 'my_port_id',
'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE,
}
result, pv6 = self.setup_port_has_ipv6_address(port)
self.assertTrue(result)
self.assertTrue(pv6.called)
def _helper_delete_floatingip_agent_gateway_port(self, port_host):
ports = [{
'id': 'my_port_id',
portbindings.HOST_ID: 'foo_host',
'network_id': 'ext_network_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
},
{
'id': 'my_new_port_id',
portbindings.HOST_ID: 'my_foo_host',
'network_id': 'ext_network_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
}]
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = ports
self.mixin.delete_floatingip_agent_gateway_port(
self.ctx, port_host, 'ext_network_id')
plugin.get_ports.assert_called_with(self.ctx, filters={
'network_id': ['ext_network_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
if port_host:
plugin.ipam.delete_port.assert_called_once_with(
self.ctx, 'my_port_id')
else:
plugin.ipam.delete_port.assert_called_with(
self.ctx, 'my_new_port_id')
def test_delete_floatingip_agent_gateway_port_without_host_id(self):
self._helper_delete_floatingip_agent_gateway_port(None)
def test_delete_floatingip_agent_gateway_port_with_host_id(self):
self._helper_delete_floatingip_agent_gateway_port(
'foo_host')
def _setup_delete_current_gw_port_deletes_fip_agent_gw_port(
self, port=None, gw_port=True):
router = mock.MagicMock()
router.extra_attributes.distributed = True
if gw_port:
gw_port_db = {
'id': 'my_gw_id',
'network_id': 'ext_net_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
}
router.gw_port = gw_port_db
else:
router.gw_port = None
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp,\
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_delete_current_gw_port'),\
mock.patch.object(
self.mixin,
'_get_router') as grtr,\
mock.patch.object(
self.mixin,
'delete_csnat_router_interface_ports') as del_csnat_port,\
mock.patch.object(
self.mixin,
'delete_floatingip_agent_gateway_port') as del_agent_gw_port,\
mock.patch.object(
self.mixin.l3_rpc_notifier,
'delete_fipnamespace_for_ext_net') as del_fip:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = port
grtr.return_value = router
self.mixin._delete_current_gw_port(
self.ctx, router['id'], router, 'ext_network_id')
return router, plugin, del_csnat_port, del_agent_gw_port, del_fip
def test_delete_current_gw_port_deletes_fip_agent_gw_port_and_fipnamespace(
self):
rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
self._setup_delete_current_gw_port_deletes_fip_agent_gw_port())
self.assertTrue(d_csnat_port.called)
self.assertTrue(d_agent_gw_port.called)
d_csnat_port.assert_called_once_with(
mock.ANY, rtr)
d_agent_gw_port.assert_called_once_with(mock.ANY, None, 'ext_net_id')
del_fip.assert_called_once_with(mock.ANY, 'ext_net_id')
def test_delete_current_gw_port_never_calls_delete_fip_agent_gw_port(self):
port = [{
'id': 'my_port_id',
'network_id': 'ext_net_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
},
{
'id': 'my_new_port_id',
'network_id': 'ext_net_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
}]
rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
self._setup_delete_current_gw_port_deletes_fip_agent_gw_port(
port=port))
self.assertTrue(d_csnat_port.called)
self.assertFalse(d_agent_gw_port.called)
self.assertFalse(del_fip.called)
d_csnat_port.assert_called_once_with(
mock.ANY, rtr)
def test_delete_current_gw_port_never_calls_delete_fipnamespace(self):
rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
self._setup_delete_current_gw_port_deletes_fip_agent_gw_port(
gw_port=False))
self.assertFalse(d_csnat_port.called)
self.assertFalse(d_agent_gw_port.called)
self.assertFalse(del_fip.called)
def _floatingip_on_port_test_setup(self, hostid):
router = {'id': 'foo_router_id', 'distributed': True}
floatingip = {
'id': _uuid(),
'port_id': _uuid(),
'router_id': 'foo_router_id',
'host': hostid
}
if not hostid:
hostid = 'not_my_host_id'
routers = {
'foo_router_id': router
}
fipagent = {
'id': _uuid()
}
# NOTE: mock.patch is not needed here since self.mixin is created fresh
# for each test. It doesn't work with some methods since the mixin is
# tested in isolation (e.g. _get_agent_by_type_and_host).
self.mixin._get_dvr_service_port_hostid = mock.Mock(
return_value=hostid)
self.mixin._get_agent_by_type_and_host = mock.Mock(
return_value=fipagent)
self.mixin._get_fip_sync_interfaces = mock.Mock(
return_value='fip_interface')
agent = mock.Mock()
agent.id = fipagent['id']
self.mixin._process_floating_ips_dvr(self.ctx, routers, [floatingip],
hostid, agent)
return (router, floatingip)
def test_floatingip_on_port_not_host(self):
router, fip = self._floatingip_on_port_test_setup(None)
self.assertNotIn(l3_const.FLOATINGIP_KEY, router)
self.assertNotIn(n_const.FLOATINGIP_AGENT_INTF_KEY, router)
def test_floatingip_on_port_with_host(self):
router, fip = self._floatingip_on_port_test_setup(_uuid())
self.assertTrue(self.mixin._get_fip_sync_interfaces.called)
self.assertIn(l3_const.FLOATINGIP_KEY, router)
self.assertIn(n_const.FLOATINGIP_AGENT_INTF_KEY, router)
self.assertIn(fip, router[l3_const.FLOATINGIP_KEY])
self.assertIn('fip_interface',
router[n_const.FLOATINGIP_AGENT_INTF_KEY])
def _setup_test_create_floatingip(
self, fip, floatingip_db, router_db):
port = {
'id': '1234',
portbindings.HOST_ID: 'myhost',
'network_id': 'external_net'
}
with mock.patch.object(self.mixin, 'get_router') as grtr,\
mock.patch.object(self.mixin,
'_get_dvr_service_port_hostid') as vmp,\
mock.patch.object(
self.mixin,
'_get_dvr_migrating_service_port_hostid'
) as mvmp,\
mock.patch.object(
self.mixin,
'create_fip_agent_gw_port_if_not_exists') as c_fip,\
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_update_fip_assoc'):
grtr.return_value = router_db
vmp.return_value = 'my-host'
mvmp.return_value = 'my-future-host'
self.mixin._update_fip_assoc(
self.ctx, fip, floatingip_db, port)
return c_fip
def test_create_floatingip_agent_gw_port_with_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
create_fip = (
self._setup_test_create_floatingip(
fip, floatingip, router))
self.assertTrue(create_fip.called)
def test_create_floatingip_agent_gw_port_with_non_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': False}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
create_fip = (
self._setup_test_create_floatingip(
fip, floatingip, router))
self.assertFalse(create_fip.called)
def test_remove_router_interface_csnat_ports_removal(self):
router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True}
router = self._create_router(router_dict)
plugin = mock.MagicMock()
with self.network() as net_ext,\
self.subnet() as subnet1,\
self.subnet(cidr='20.0.0.0/24') as subnet2:
ext_net_id = net_ext['network']['id']
self.core_plugin.update_network(
self.ctx, ext_net_id,
{'network': {'router:external': True}})
self.mixin.update_router(
self.ctx, router['id'],
{'router': {'external_gateway_info':
{'network_id': ext_net_id}}})
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet1['subnet']['id']})
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet2['subnet']['id']})
csnat_filters = {'device_owner':
[l3_const.DEVICE_OWNER_ROUTER_SNAT]}
csnat_ports = self.core_plugin.get_ports(
self.ctx, filters=csnat_filters)
self.assertEqual(2, len(csnat_ports))
dvr_filters = {'device_owner':
[l3_const.DEVICE_OWNER_DVR_INTERFACE]}
dvr_ports = self.core_plugin.get_ports(
self.ctx, filters=dvr_filters)
self.assertEqual(2, len(dvr_ports))
with mock.patch.object(manager.NeutronManager,
'get_service_plugins') as get_svc_plugin:
get_svc_plugin.return_value = {
plugin_const.L3_ROUTER_NAT: plugin}
self.mixin.manager = manager
self.mixin.remove_router_interface(
self.ctx, router['id'], {'port_id': dvr_ports[0]['id']})
csnat_ports = self.core_plugin.get_ports(
self.ctx, filters=csnat_filters)
self.assertEqual(1, len(csnat_ports))
self.assertEqual(dvr_ports[1]['fixed_ips'][0]['subnet_id'],
csnat_ports[0]['fixed_ips'][0]['subnet_id'])
dvr_ports = self.core_plugin.get_ports(
self.ctx, filters=dvr_filters)
self.assertEqual(1, len(dvr_ports))
def _setup_router_with_v4_and_v6(self):
router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True}
router = self._create_router(router_dict)
plugin = mock.MagicMock()
with self.network() as net_ext, self.network() as net_int:
ext_net_id = net_ext['network']['id']
self.core_plugin.update_network(
self.ctx, ext_net_id,
{'network': {'router:external': True}})
self.mixin.update_router(
self.ctx, router['id'],
{'router': {'external_gateway_info':
{'network_id': ext_net_id}}})
with self.subnet(
network=net_int, cidr='20.0.0.0/24') as subnet_v4,\
self.subnet(
network=net_int, cidr='fe80::/64',
gateway_ip='fe80::1', ip_version=6) as subnet_v6:
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet_v4['subnet']['id']})
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet_v6['subnet']['id']})
get_svc_plugin = mock.patch.object(
manager.NeutronManager, 'get_service_plugins').start()
get_svc_plugin.return_value = {
plugin_const.L3_ROUTER_NAT: plugin}
self.mixin.manager = manager
return router, subnet_v4, subnet_v6
def test_undo_router_interface_change_on_csnat_error(self):
self._test_undo_router_interface_change_on_csnat_error(False)
def test_undo_router_interface_change_on_csnat_error_revert_failure(self):
self._test_undo_router_interface_change_on_csnat_error(True)
def _test_undo_router_interface_change_on_csnat_error(self, fail_revert):
router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6()
net = {'network': {'id': subnet_v6['subnet']['network_id'],
'tenant_id': subnet_v6['subnet']['tenant_id']}}
orig_update = self.mixin._core_plugin.update_port
def update_port(*args, **kwargs):
# 1st port update is the interface, 2nd is csnat, 3rd is revert
# we want to simulate errors after the 1st
update_port.calls += 1
if update_port.calls == 2:
raise RuntimeError('csnat update failure')
if update_port.calls == 3 and fail_revert:
# this is to ensure that if the revert fails, the original
# exception is raised (not this ValueError)
raise ValueError('failure from revert')
return orig_update(*args, **kwargs)
update_port.calls = 0
self.mixin._core_plugin.update_port = update_port
with self.subnet(network=net, cidr='fe81::/64',
gateway_ip='fe81::1', ip_version=6) as subnet2_v6:
with testtools.ExpectedException(RuntimeError):
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet2_v6['subnet']['id']})
if fail_revert:
# a revert failure will mean the interface is still added
# so we can't re-add it
return
# starting over should work if first interface was cleaned up
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet2_v6['subnet']['id']})
def test_remove_router_interface_csnat_ports_removal_with_ipv6(self):
router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6()
csnat_filters = {'device_owner':
[l3_const.DEVICE_OWNER_ROUTER_SNAT]}
csnat_ports = self.core_plugin.get_ports(
self.ctx, filters=csnat_filters)
self.assertEqual(2, len(csnat_ports))
dvr_filters = {'device_owner':
[l3_const.DEVICE_OWNER_DVR_INTERFACE]}
dvr_ports = self.core_plugin.get_ports(
self.ctx, filters=dvr_filters)
self.assertEqual(2, len(dvr_ports))
self.mixin.remove_router_interface(
self.ctx, router['id'],
{'subnet_id': subnet_v4['subnet']['id']})
csnat_ports = self.core_plugin.get_ports(
self.ctx, filters=csnat_filters)
self.assertEqual(1, len(csnat_ports))
self.assertEqual(
subnet_v6['subnet']['id'],
csnat_ports[0]['fixed_ips'][0]['subnet_id'])
dvr_ports = self.core_plugin.get_ports(
self.ctx, filters=dvr_filters)
self.assertEqual(1, len(dvr_ports))
def test_remove_router_interface_csnat_port_missing_ip(self):
# NOTE(kevinbenton): this is a contrived scenario to reproduce
# a condition observed in bug/1609540. Once we figure out why
# these ports lose their IP we can remove this test.
router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6()
self.mixin.remove_router_interface(
self.ctx, router['id'],
{'subnet_id': subnet_v4['subnet']['id']})
csnat_filters = {'device_owner':
[l3_const.DEVICE_OWNER_ROUTER_SNAT]}
csnat_ports = self.core_plugin.get_ports(
self.ctx, filters=csnat_filters)
self.core_plugin.update_port(self.ctx, csnat_ports[0]['id'],
{'port': {'fixed_ips': []}})
self.mixin.remove_router_interface(
self.ctx, router['id'],
{'subnet_id': subnet_v6['subnet']['id']})
def test__validate_router_migration_notify_advanced_services(self):
router = {'name': 'foo_router', 'admin_state_up': False}
router_db = self._create_router(router)
with mock.patch.object(l3_dvr_db.registry, 'notify') as mock_notify:
self.mixin._validate_router_migration(
self.ctx, router_db, {'distributed': True})
kwargs = {'context': self.ctx, 'router': router_db}
mock_notify.assert_called_once_with(
'router', 'before_update', self.mixin, **kwargs)
def _test_update_arp_entry_for_dvr_service_port(
self, device_owner, action):
router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True}
router = self._create_router(router_dict)
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
l3_notify = self.mixin.l3_rpc_notifier = mock.Mock()
gp.return_value = plugin
port = {
'id': 'my_port_id',
'fixed_ips': [
{'subnet_id': '51edc9e0-24f9-47f2-8e1e-2a41cb691323',
'ip_address': '10.0.0.11'},
{'subnet_id': '2b7c8a07-6f8e-4937-8701-f1d5da1a807c',
'ip_address': '10.0.0.21'},
{'subnet_id': '48534187-f077-4e81-93ff-81ec4cc0ad3b',
'ip_address': 'fd45:1515:7e0:0:f816:3eff:fe1a:1111'}],
'mac_address': 'my_mac',
'device_owner': device_owner
}
dvr_port = {
'id': 'dvr_port_id',
'fixed_ips': mock.ANY,
'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE,
'device_id': router['id']
}
plugin.get_ports.return_value = [dvr_port]
if action == 'add':
self.mixin.update_arp_entry_for_dvr_service_port(
self.ctx, port)
self.assertEqual(3, l3_notify.add_arp_entry.call_count)
elif action == 'del':
self.mixin.delete_arp_entry_for_dvr_service_port(
self.ctx, port)
self.assertEqual(3, l3_notify.del_arp_entry.call_count)
def test_update_arp_entry_for_dvr_service_port_added(self):
action = 'add'
device_owner = l3_const.DEVICE_OWNER_LOADBALANCER
self._test_update_arp_entry_for_dvr_service_port(device_owner, action)
def test_update_arp_entry_for_dvr_service_port_deleted(self):
action = 'del'
device_owner = l3_const.DEVICE_OWNER_LOADBALANCER
self._test_update_arp_entry_for_dvr_service_port(device_owner, action)
def test_add_router_interface_csnat_ports_failure(self):
router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True}
router = self._create_router(router_dict)
with self.network() as net_ext,\
self.subnet() as subnet:
ext_net_id = net_ext['network']['id']
self.core_plugin.update_network(
self.ctx, ext_net_id,
{'network': {'router:external': True}})
self.mixin.update_router(
self.ctx, router['id'],
{'router': {'external_gateway_info':
{'network_id': ext_net_id}}})
with mock.patch.object(
self.mixin, '_add_csnat_router_interface_port') as f:
f.side_effect = RuntimeError()
self.assertRaises(
RuntimeError,
self.mixin.add_router_interface,
self.ctx, router['id'],
{'subnet_id': subnet['subnet']['id']})
filters = {
'device_id': [router['id']],
}
router_ports = self.core_plugin.get_ports(self.ctx, filters)
self.assertEqual(1, len(router_ports))
self.assertEqual(l3_const.DEVICE_OWNER_ROUTER_GW,
router_ports[0]['device_owner'])
| apache-2.0 |
annarev/tensorflow | tensorflow/python/keras/mixed_precision/device_compatibility_check_test.py | 5 | 5587 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the device compatibility check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.keras import combinations
from tensorflow.python.keras.mixed_precision import device_compatibility_check
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def device_details(device_name, compute_capability=None):
details = {}
if device_name:
details['device_name'] = device_name
if compute_capability:
details['compute_capability'] = compute_capability
return details
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class DeviceCompatibilityCheckTest(test.TestCase):
def _test_compat_check(self, device_attr_list, should_warn, expected_regex,
policy_name='mixed_float16'):
with test.mock.patch.object(tf_logging, 'warn') as mock_warn, \
test.mock.patch.object(tf_logging, 'info') as mock_info:
device_compatibility_check._log_device_compatibility_check(
policy_name, device_attr_list)
if should_warn:
self.assertRegex(mock_warn.call_args[0][0], expected_regex)
mock_info.assert_not_called()
else:
self.assertRegex(mock_info.call_args[0][0], expected_regex)
mock_warn.assert_not_called()
def test_supported(self):
details_list = [device_details('GPU 1', (7, 1))]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): OK\n'
r'Your GPU will likely run quickly with dtype policy mixed_float16 as '
r'it has compute capability of at least 7.0. Your GPU: GPU 1, compute '
r'capability 7.1', flags=re.MULTILINE)
self._test_compat_check(details_list, False, regex)
details_list = [
device_details('GPU 1', (7, 0)),
device_details('GPU 2', (7, 1)),
device_details('GPU 3', (8, 0)),
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): OK\n'
r'Your GPUs will likely run quickly with dtype policy mixed_float16 as '
r'they all have compute capability of at least 7.0', flags=re.MULTILINE)
self._test_compat_check(details_list, False, regex)
def test_unsupported(self):
details_list = [
device_details('GPU 1', (6, 0))
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPU may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 6.0\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = [
device_details(None)
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPU may run slowly with dtype policy mixed_float16.*\n'
r' Unknown GPU, no compute capability \(probably not an Nvidia GPU\)\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = [
device_details('GPU 1', (6, 0)),
device_details('GPU 2', (3, 10)),
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPUs may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 6.0\n'
r' GPU 2, compute capability 3.10\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = [
device_details('GPU 1', (6, 0)),
device_details('GPU 1', (6, 0)),
device_details('GPU 1', (6, 0)),
device_details('GPU 2', (3, 10)),
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Your GPUs may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 6.0 \(x3\)\n'
r' GPU 2, compute capability 3.10\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
details_list = []
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'The dtype policy mixed_float16 may run slowly because this machine '
r'does not have a GPU', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
def test_mix_of_supported_and_unsupported(self):
details_list = [
device_details('GPU 1', (7, 0)),
device_details('GPU 1', (7, 0)),
device_details('GPU 2', (6, 0))
]
regex = re.compile(
r'.*compatibility check \(mixed_float16\): WARNING\n'
r'Some of your GPUs may run slowly with dtype policy mixed_float16.*\n'
r' GPU 1, compute capability 7.0 \(x2\)\n'
r' GPU 2, compute capability 6.0\n'
r'See.*', flags=re.MULTILINE)
self._test_compat_check(details_list, True, regex)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ifduyue/django | django/db/migrations/autodetector.py | 9 | 59878 | import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp,
)
from .topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, functools.partial):
return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to. Used for detecting renames (as,
of course, the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = set()
self.old_proxy_keys = set()
self.old_unmanaged_keys = set()
self.new_model_keys = set()
self.new_proxy_keys = set()
self.new_unmanaged_keys = set()
for al, mn in self.from_state.models:
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.add((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.add((al, mn))
else:
self.old_model_keys.add((al, mn))
for al, mn in self.to_state.models:
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.add((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.add((al, mn))
else:
self.new_model_keys.add((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
# Generate index removal operations before field is removed
self.generate_removed_indexes()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = self.old_model_keys & self.new_model_keys
self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys
self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys
self.through_users = {}
self.old_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.from_state.models[
app_label,
self.renamed_models.get((app_label, model_name), model_name)
].fields
}
self.new_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.to_state.models[app_label, model_name].fields
}
def _generate_through_model_map(self):
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and
not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
def _build_migration_list(self, graph=None):
"""
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = self.new_model_keys - self.old_model_keys
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = self.old_model_keys - self.new_model_keys
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
model_opts = self.new_apps.get_model(app_label, model_name)._meta
dependencies = []
for field in model_opts.get_fields():
if field.is_relation:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
),
dependencies=dependencies,
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.add((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (these are optimized later, if
possible).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together, index_together).
"""
old_keys = self.old_model_keys | self.old_unmanaged_keys
added_models = self.new_model_keys - old_keys
added_unmanaged_models = self.new_unmanaged_keys - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there indexes/unique|index_together to defer?
indexes = model_state.options.pop('indexes')
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(field)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
if (app_label, model_name) in self.old_proxy_keys:
for related_object in model_opts.related_objects:
self.add_operation(
related_object.related_model._meta.app_label,
operations.AlterField(
model_name=related_object.related_model._meta.object_name,
name=related_object.field.name,
field=related_object.field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
"""
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but of course for proxy
models it's safe to skip all the pointless field stuff and just chuck
out an operation.
"""
added = self.new_proxy_keys - self.old_proxy_keys
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (these are optimized later, if
possible).
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = self.new_model_keys | self.new_unmanaged_keys
deleted_models = self.old_model_keys - new_keys
deleted_unmanaged_models = self.old_unmanaged_keys - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""Make DeleteModel options for proxy models."""
deleted = self.old_proxy_keys - self.new_proxy_keys
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""Work out renamed fields."""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
if (not field.null and not field.has_default() and
not field.many_to_many and
not (field.blank and field.empty_strings_allowed) and
not (isinstance(field, time_fields) and field.auto_now)):
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name)
else:
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Make AlterField operations, or possibly RemovedField/AddField if alter
isn's possible.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys & self.new_field_keys):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None):
rename_key = (
new_field.remote_field.through._meta.app_label,
new_field.remote_field.through._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not new_field.many_to_many):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update({
(app_label, model_name): {
'added_indexes': add_idx, 'removed_indexes': rem_idx,
}
})
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['added_indexes']:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
)
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['removed_indexes']:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
)
)
def _get_dependencies_for_foreign_key(self, field):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
return dependencies
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name)
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
} if old_value else set()
new_value = new_model_state.options.get(option_name)
new_value = set(new_value) if new_value else set()
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
self.old_model_keys & self.new_unmanaged_keys,
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = {
key: value for key, value in old_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
new_options = {
key: value for key, value in new_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps])
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggest a name for the migration they might
represent. Names are not guaranteed to be unique, but put some effort
into the fallback name to avoid VCS conflicts if possible.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % get_migration_name_timestamp()
@classmethod
def parse_number(cls, name):
"""
Given a migration name, try to extract a number from the beginning of
it. If no number is found, return None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
| bsd-3-clause |
paulorauber/pgm | examples/die.py | 1 | 3385 | import numpy as np
import contextlib
from model.factor import RandomVar
from model.factor import Factor
from model.factor import CPD
from model.gd import BayesianNetwork
from inference.approximate import ForwardSampler
from learning.parameter import ExpectationMaximization
from learning.structure import LikelihoodScore
def die():
# Parameters
# d1_ = [0.2, 0.0, 0.5, 0.1, 0.1, 0.1]
# d2_ = [0.2, 0.3, 0.1, 0.05, 0.05, 0.3]
d1_ = [0.1, 0.9]
d2_ = [0.6, 0.4]
n_samples = 5000
n_iterations = 10
n_restarts = 2
verbose = 2
# Model creation
if len(d1_) != len(d2_):
raise Exception('The die should have the same cardinality')
h = RandomVar('h', 2)
o1 = RandomVar('o1', len(d1_))
o2 = RandomVar('o2', len(d2_))
f_h = CPD([h], [0.5, 0.5])
f_o1_h = Factor([o1, h])
f_o2_h = Factor([o2, h])
for i in range(len(f_o1_h.values)):
o_, h_ = f_o1_h.itoa(i)
f_o1_h.values[i] = d1_[o_] if h_ == 0 else d2_[o_]
f_o2_h.values[i] = d2_[o_] if h_ == 0 else d1_[o_]
f_o1_h = CPD(f_o1_h.scope, f_o1_h.values)
f_o2_h = CPD(f_o2_h.scope, f_o2_h.values)
bn = BayesianNetwork([f_h, f_o1_h, f_o2_h])
# Sampling from true model
fs = ForwardSampler(bn)
fs.sample(n_samples)
scope, X = fs.samples_to_matrix()
em = ExpectationMaximization(scope, known_cpds=[f_h],
n_iterations=n_iterations,
n_restarts=n_restarts, alpha=10.0,
verbose=verbose)
print('True log-likelihood (no missing variables):')
print(em.log_likelihood(X, bn))
print('Maximum log-likelihood (no missing variables):')
ls = LikelihoodScore(scope)
ls.fit(X, bn.graph())
print(ls.score)
# Hiding variable
X[:, scope.index(h)] = -1
print('True log-likelihood (missing variables):')
print(em.log_likelihood(X, bn))
bn_pred = em.fit_predict(X, bn.graph())
print('Best log-likelihood (missing variables)')
print(em.log_likelihood(X, bn_pred))
# Estimation results
print('Results:')
f_o1_h = [f for f in bn_pred.factors if f.scope[0] == o1][0]
f_o2_h = [f for f in bn_pred.factors if f.scope[0] == o2][0]
d = np.zeros(o1.k)
d1 = np.zeros(o1.k)
d2 = np.zeros(o1.k)
with printoptions(precision=3):
print('d1: {0}'.format(d1_))
for i in range(o1.k):
d[i] = f_o1_h.values[f_o1_h.atoi([i, 0])]
print('d1 according to o1: {0}'.format(d))
d1 += d
for i in range(o2.k):
d[i] = f_o2_h.values[f_o2_h.atoi([i, 1])]
print('d1 according to o2: {0}'.format(d))
d1 += d
print('d2: {0}'.format(d2_))
for i in range(o1.k):
d[i] = f_o1_h.values[f_o1_h.atoi([i, 1])]
print('d2 according to o1: {0}'.format(d))
d2 += d
for i in range(o2.k):
d[i] = f_o2_h.values[f_o2_h.atoi([i, 0])]
print('d2 according to o2: {0}'.format(d))
d2 += d
print('Average estimate:')
print('d1: {0}'.format(d1/2.))
print('d2: {0}'.format(d2/2.))
@contextlib.contextmanager
def printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
if __name__ == "__main__":
die()
| mit |
zerc/django | django/db/backends/sqlite3/base.py | 323 | 18115 | """
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import re
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.safestring import SafeBytes
try:
import pytz
except ImportError:
pytz = None
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
# Some of these import sqlite3, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The SQLite database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime))
Database.register_converter(str("timestamp"), decoder(parse_datetime))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_warn_on_aware_datetime)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db(self.settings_dict['NAME']):
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self, name):
return name == ":memory:" or "mode=memory" in force_text(name)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, six.integer_types):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, six.integer_types):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| bsd-3-clause |
whix/tablib | tablib/packages/openpyxl3/writer/drawings.py | 116 | 8268 | # coding=UTF-8
'''
Copyright (c) 2010 openpyxl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@license: http://www.opensource.org/licenses/mit-license.php
@author: Eric Gazoni
'''
from ..shared.xmltools import Element, SubElement, get_document_content
class DrawingWriter(object):
""" one main drawing file per sheet """
def __init__(self, sheet):
self._sheet = sheet
def write(self):
""" write drawings for one sheet in one file """
root = Element('xdr:wsDr',
{'xmlns:xdr' : "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing",
'xmlns:a' : "http://schemas.openxmlformats.org/drawingml/2006/main"})
for i, chart in enumerate(self._sheet._charts):
drawing = chart.drawing
# anchor = SubElement(root, 'xdr:twoCellAnchor')
# (start_row, start_col), (end_row, end_col) = drawing.coordinates
# # anchor coordinates
# _from = SubElement(anchor, 'xdr:from')
# x = SubElement(_from, 'xdr:col').text = str(start_col)
# x = SubElement(_from, 'xdr:colOff').text = '0'
# x = SubElement(_from, 'xdr:row').text = str(start_row)
# x = SubElement(_from, 'xdr:rowOff').text = '0'
# _to = SubElement(anchor, 'xdr:to')
# x = SubElement(_to, 'xdr:col').text = str(end_col)
# x = SubElement(_to, 'xdr:colOff').text = '0'
# x = SubElement(_to, 'xdr:row').text = str(end_row)
# x = SubElement(_to, 'xdr:rowOff').text = '0'
# we only support absolute anchor atm (TODO: oneCellAnchor, twoCellAnchor
x, y, w, h = drawing.get_emu_dimensions()
anchor = SubElement(root, 'xdr:absoluteAnchor')
SubElement(anchor, 'xdr:pos', {'x':str(x), 'y':str(y)})
SubElement(anchor, 'xdr:ext', {'cx':str(w), 'cy':str(h)})
# graph frame
frame = SubElement(anchor, 'xdr:graphicFrame', {'macro':''})
name = SubElement(frame, 'xdr:nvGraphicFramePr')
SubElement(name, 'xdr:cNvPr', {'id':'%s' % i, 'name':'Graphique %s' % i})
SubElement(name, 'xdr:cNvGraphicFramePr')
frm = SubElement(frame, 'xdr:xfrm')
# no transformation
SubElement(frm, 'a:off', {'x':'0', 'y':'0'})
SubElement(frm, 'a:ext', {'cx':'0', 'cy':'0'})
graph = SubElement(frame, 'a:graphic')
data = SubElement(graph, 'a:graphicData',
{'uri':'http://schemas.openxmlformats.org/drawingml/2006/chart'})
SubElement(data, 'c:chart',
{ 'xmlns:c':'http://schemas.openxmlformats.org/drawingml/2006/chart',
'xmlns:r':'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'r:id':'rId%s' % (i + 1)})
SubElement(anchor, 'xdr:clientData')
return get_document_content(root)
def write_rels(self, chart_id):
root = Element('Relationships',
{'xmlns' : 'http://schemas.openxmlformats.org/package/2006/relationships'})
for i, chart in enumerate(self._sheet._charts):
attrs = {'Id' : 'rId%s' % (i + 1),
'Type' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/chart',
'Target' : '../charts/chart%s.xml' % (chart_id + i) }
SubElement(root, 'Relationship', attrs)
return get_document_content(root)
class ShapeWriter(object):
""" one file per shape """
schema = "http://schemas.openxmlformats.org/drawingml/2006/main"
def __init__(self, shapes):
self._shapes = shapes
def write(self, shape_id):
root = Element('c:userShapes', {'xmlns:c' : 'http://schemas.openxmlformats.org/drawingml/2006/chart'})
for shape in self._shapes:
anchor = SubElement(root, 'cdr:relSizeAnchor',
{'xmlns:cdr' : "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"})
xstart, ystart, xend, yend = shape.get_coordinates()
_from = SubElement(anchor, 'cdr:from')
SubElement(_from, 'cdr:x').text = str(xstart)
SubElement(_from, 'cdr:y').text = str(ystart)
_to = SubElement(anchor, 'cdr:to')
SubElement(_to, 'cdr:x').text = str(xend)
SubElement(_to, 'cdr:y').text = str(yend)
sp = SubElement(anchor, 'cdr:sp', {'macro':'', 'textlink':''})
nvspr = SubElement(sp, 'cdr:nvSpPr')
SubElement(nvspr, 'cdr:cNvPr', {'id':str(shape_id), 'name':'shape %s' % shape_id})
SubElement(nvspr, 'cdr:cNvSpPr')
sppr = SubElement(sp, 'cdr:spPr')
frm = SubElement(sppr, 'a:xfrm', {'xmlns:a':self.schema})
# no transformation
SubElement(frm, 'a:off', {'x':'0', 'y':'0'})
SubElement(frm, 'a:ext', {'cx':'0', 'cy':'0'})
prstgeom = SubElement(sppr, 'a:prstGeom', {'xmlns:a':self.schema, 'prst':str(shape.style)})
SubElement(prstgeom, 'a:avLst')
fill = SubElement(sppr, 'a:solidFill', {'xmlns:a':self.schema})
SubElement(fill, 'a:srgbClr', {'val':shape.color})
border = SubElement(sppr, 'a:ln', {'xmlns:a':self.schema, 'w':str(shape._border_width)})
sf = SubElement(border, 'a:solidFill')
SubElement(sf, 'a:srgbClr', {'val':shape.border_color})
self._write_style(sp)
self._write_text(sp, shape)
shape_id += 1
return get_document_content(root)
def _write_text(self, node, shape):
""" write text in the shape """
tx_body = SubElement(node, 'cdr:txBody')
SubElement(tx_body, 'a:bodyPr', {'xmlns:a':self.schema, 'vertOverflow':'clip'})
SubElement(tx_body, 'a:lstStyle',
{'xmlns:a':self.schema})
p = SubElement(tx_body, 'a:p', {'xmlns:a':self.schema})
if shape.text:
r = SubElement(p, 'a:r')
rpr = SubElement(r, 'a:rPr', {'lang':'en-US'})
fill = SubElement(rpr, 'a:solidFill')
SubElement(fill, 'a:srgbClr', {'val':shape.text_color})
SubElement(r, 'a:t').text = shape.text
else:
SubElement(p, 'a:endParaRPr', {'lang':'en-US'})
def _write_style(self, node):
""" write style theme """
style = SubElement(node, 'cdr:style')
ln_ref = SubElement(style, 'a:lnRef', {'xmlns:a':self.schema, 'idx':'2'})
scheme_clr = SubElement(ln_ref, 'a:schemeClr', {'val':'accent1'})
SubElement(scheme_clr, 'a:shade', {'val':'50000'})
fill_ref = SubElement(style, 'a:fillRef', {'xmlns:a':self.schema, 'idx':'1'})
SubElement(fill_ref, 'a:schemeClr', {'val':'accent1'})
effect_ref = SubElement(style, 'a:effectRef', {'xmlns:a':self.schema, 'idx':'0'})
SubElement(effect_ref, 'a:schemeClr', {'val':'accent1'})
font_ref = SubElement(style, 'a:fontRef', {'xmlns:a':self.schema, 'idx':'minor'})
SubElement(font_ref, 'a:schemeClr', {'val':'lt1'})
| mit |
brennie/reviewboard | reviewboard/reviews/tests/test_default_reviewer.py | 3 | 3641 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from reviewboard.reviews.models import DefaultReviewer
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
class DefaultReviewerTests(TestCase):
"""Unit tests for DefaultReviewer."""
fixtures = ['test_scmtools']
def test_for_repository(self):
"""Testing DefaultReviewer.objects.for_repository"""
tool = Tool.objects.get(name='CVS')
default_reviewer1 = DefaultReviewer.objects.create(name='Test',
file_regex='.*')
default_reviewer2 = DefaultReviewer.objects.create(name='Bar',
file_regex='.*')
repo1 = Repository.objects.create(name='Test1', path='path1',
tool=tool)
default_reviewer1.repository.add(repo1)
repo2 = Repository.objects.create(name='Test2', path='path2',
tool=tool)
default_reviewers = DefaultReviewer.objects.for_repository(repo1, None)
self.assertEqual(len(default_reviewers), 2)
self.assertIn(default_reviewer1, default_reviewers)
self.assertIn(default_reviewer2, default_reviewers)
default_reviewers = DefaultReviewer.objects.for_repository(repo2, None)
self.assertEqual(len(default_reviewers), 1)
self.assertIn(default_reviewer2, default_reviewers)
def test_for_repository_with_localsite(self):
"""Testing DefaultReviewer.objects.for_repository with a LocalSite."""
test_site = LocalSite.objects.create(name='test')
default_reviewer1 = DefaultReviewer.objects.create(
name='Test 1',
file_regex='.*',
local_site=test_site)
default_reviewer2 = DefaultReviewer.objects.create(
name='Test 2',
file_regex='.*')
default_reviewers = DefaultReviewer.objects.for_repository(
None, test_site)
self.assertEqual(len(default_reviewers), 1)
self.assertIn(default_reviewer1, default_reviewers)
default_reviewers = DefaultReviewer.objects.for_repository(None, None)
self.assertEqual(len(default_reviewers), 1)
self.assertIn(default_reviewer2, default_reviewers)
def test_review_request_add_default_reviewer_with_inactive_user(self):
"""Testing adding default reviewer with inactive user to review request
"""
tool = Tool.objects.get(name='CVS')
default_reviewer1 = DefaultReviewer.objects.create(name='Test',
file_regex='.*')
repo1 = Repository.objects.create(name='Test1',
path='path1',
tool=tool)
default_reviewer1.repository.add(repo1)
user1 = User.objects.create(username='User1')
default_reviewer1.people.add(user1)
user2 = User.objects.create(username='User2', is_active=False)
default_reviewer1.people.add(user2)
review_request = self.create_review_request(repository=repo1,
submitter=user1)
diffset = self.create_diffset(review_request)
self.create_filediff(diffset)
review_request.add_default_reviewers()
self.assertIn(user1, review_request.target_people.all())
self.assertNotIn(user2, review_request.target_people.all())
| mit |
meredith-digops/ansible | lib/ansible/modules/cloud/cloudstack/cs_facts.py | 39 | 7253 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_facts
short_description: Gather facts on instances of Apache CloudStack based clouds.
description:
- This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
filter:
description:
- Filter for a specific fact.
required: false
default: null
choices:
- cloudstack_service_offering
- cloudstack_availability_zone
- cloudstack_public_hostname
- cloudstack_public_ipv4
- cloudstack_local_hostname
- cloudstack_local_ipv4
- cloudstack_instance_id
- cloudstack_user_data
requirements: [ 'yaml' ]
'''
EXAMPLES = '''
# Gather all facts on instances
- name: Gather cloudstack facts
cs_facts:
# Gather specific fact on instances
- name: Gather cloudstack facts
cs_facts: filter=cloudstack_instance_id
'''
RETURN = '''
---
cloudstack_availability_zone:
description: zone the instance is deployed in.
returned: success
type: string
sample: ch-gva-2
cloudstack_instance_id:
description: UUID of the instance.
returned: success
type: string
sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_local_hostname:
description: local hostname of the instance.
returned: success
type: string
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_local_ipv4:
description: local IPv4 of the instance.
returned: success
type: string
sample: 185.19.28.35
cloudstack_public_hostname:
description: public IPv4 of the router. Same as C(cloudstack_public_ipv4).
returned: success
type: string
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_public_ipv4:
description: public IPv4 of the router.
returned: success
type: string
sample: 185.19.28.35
cloudstack_service_offering:
description: service offering of the instance.
returned: success
type: string
sample: Micro 512mb 1cpu
cloudstack_user_data:
description: data of the instance provided by users.
returned: success
type: dict
sample: { "bla": "foo" }
'''
import os
try:
import yaml
has_lib_yaml = True
except ImportError:
has_lib_yaml = False
CS_METADATA_BASE_URL = "http://%s/latest/meta-data"
CS_USERDATA_BASE_URL = "http://%s/latest/user-data"
class CloudStackFacts(object):
def __init__(self):
self.facts = ansible_facts(module)
self.api_ip = None
self.fact_paths = {
'cloudstack_service_offering': 'service-offering',
'cloudstack_availability_zone': 'availability-zone',
'cloudstack_public_hostname': 'public-hostname',
'cloudstack_public_ipv4': 'public-ipv4',
'cloudstack_local_hostname': 'local-hostname',
'cloudstack_local_ipv4': 'local-ipv4',
'cloudstack_instance_id': 'instance-id'
}
def run(self):
result = {}
filter = module.params.get('filter')
if not filter:
for key,path in self.fact_paths.items():
result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path)
result['cloudstack_user_data'] = self._get_user_data_json()
else:
if filter == 'cloudstack_user_data':
result['cloudstack_user_data'] = self._get_user_data_json()
elif filter in self.fact_paths:
result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter])
return result
def _get_user_data_json(self):
try:
# this data come form users, we try what we can to parse it...
return yaml.safe_load(self._fetch(CS_USERDATA_BASE_URL))
except:
return None
def _fetch(self, path):
api_ip = self._get_api_ip()
if not api_ip:
return None
api_url = path % api_ip
(response, info) = fetch_url(module, api_url, force=True)
if response:
data = response.read()
else:
data = None
return data
def _get_dhcp_lease_file(self):
"""Return the path of the lease file."""
default_iface = self.facts['default_ipv4']['interface']
dhcp_lease_file_locations = [
'/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu
'/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6
'/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7
'/var/db/dhclient.leases.%s' % default_iface, # openbsd
]
for file_path in dhcp_lease_file_locations:
if os.path.exists(file_path):
return file_path
module.fail_json(msg="Could not find dhclient leases file.")
def _get_api_ip(self):
"""Return the IP of the DHCP server."""
if not self.api_ip:
dhcp_lease_file = self._get_dhcp_lease_file()
for line in open(dhcp_lease_file):
if 'dhcp-server-identifier' in line:
# get IP of string "option dhcp-server-identifier 185.19.28.176;"
line = line.translate(None, ';')
self.api_ip = line.split()[2]
break
if not self.api_ip:
module.fail_json(msg="No dhcp-server-identifier found in leases file.")
return self.api_ip
def main():
global module
module = AnsibleModule(
argument_spec = dict(
filter = dict(default=None, choices=[
'cloudstack_service_offering',
'cloudstack_availability_zone',
'cloudstack_public_hostname',
'cloudstack_public_ipv4',
'cloudstack_local_hostname',
'cloudstack_local_ipv4',
'cloudstack_instance_id',
'cloudstack_user_data',
]),
),
supports_check_mode=False
)
if not has_lib_yaml:
module.fail_json(msg="missing python library: yaml")
cs_facts = CloudStackFacts().run()
cs_facts_result = dict(changed=False, ansible_facts=cs_facts)
module.exit_json(**cs_facts_result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.facts import *
if __name__ == '__main__':
main()
| gpl-3.0 |
AnthonyBroadCrawford/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/example/hsts_wsh.py | 486 | 1784 | # Copyright 2013, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
request.extra_headers.append(
('Strict-Transport-Security', 'max-age=86400'))
def web_socket_transfer_data(request):
request.ws_stream.send_message('Hello', binary=False)
# vi:sts=4 sw=4 et
| mpl-2.0 |
dmsuehir/spark-tk | regression-tests/sparktkregtests/testcases/scoretests/lda.py | 12 | 2094 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" test cases for LDA implementation """
import unittest
import os
from sparktkregtests.lib import sparktk_test
from sparktkregtests.lib import scoring_utils
class LDAModelTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Set LDA frame."""
super(LDAModelTest, self).setUp()
schema = [('paper', str),
('word', str),
('count', int),
('topic', str)]
self.lda_frame = self.context.frame.import_csv(self.get_file("lda8.csv"), schema=schema)
def test_model_scoring(self):
"""Test lda model scoring"""
model = self.context.models.clustering.lda.train(self.lda_frame, 'paper', 'word', 'count',
num_topics=5, max_iterations=10, seed=0)
test_phrase = ["word-0-0", "word-1-0",
"word-2-0", "word-3-0", "word-4-0"]
file_name = self.get_name("lda")
model_path = model.export_to_mar(self.get_export_file(file_name))
res = lda_model.predict(test_phrase)["topics_given_doc"]
with scoring_utils.scorer(
model_path, self.id()) as scorer:
result = scorer.score([{"paper":test_phrase}]).json()
for i, j in zip(res, result[u"data"][0]["topics_given_doc"]):
self.assertAlmostEqual(i, j)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pygments/styles/vim.py | 31 | 1976 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| apache-2.0 |
mmaker/bridgedb | lib/bridgedb/parse/__init__.py | 6 | 2621 | # -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013 Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
'''Package containing modules for parsing data.
.. py:module:: bridgedb.parse
:synopsis: Package containing modules for parsing data.
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import binascii
class InvalidBase64(ValueError):
"""Raised if parsing or decoding cannot continue due to invalid base64."""
def padBase64(b64string):
"""Re-add any stripped equals sign character padding to a b64 string.
:param string b64string: A base64-encoded string which might have had its
trailing equals sign (``=``) padding removed.
:raises ValueError: if there was any error while manipulating the string.
:returns: A properly-padded (according to the base64 spec: :rfc:`4648`)
string.
"""
addchars = 0
try:
b64string = b64string.strip()
remainder = len(b64string) % 4
if 2 <= remainder <= 3:
addchars = 4 - remainder
except AttributeError as error:
raise ValueError(error)
else:
if not addchars:
raise ValueError("Invalid base64-encoded string: %r" % b64string)
b64string += '=' * addchars
return b64string
def parseUnpaddedBase64(field):
"""Parse an unpadded, base64-encoded field.
The **field** will be re-padded, if need be, and then base64 decoded.
:param str field: Should be some base64-encoded thing, with any trailing
``=``-characters removed.
:raises InvalidBase64: if there is an error in either unpadding or decoding
**field**.
:rtype: str
:returns: The base64-decoded **field**.
"""
if field.endswith('='):
raise InvalidBase64("Unpadded, base64-encoded networkstatus field "\
"must not end with '=': %r" % field)
try:
paddedField = padBase64(field) # Add the trailing equals sign back in
except ValueError as error:
raise InvalidBase64(error)
debasedField = binascii.a2b_base64(paddedField)
if not debasedField:
raise InvalidBase64("Base64-encoded networkstatus field %r is invalid!"
% field)
return debasedField
| bsd-3-clause |
47lining/ansible | plugins/callbacks/osx_say.py | 72 | 3203 |
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import os
FAILED_VOICE="Zarvox"
REGULAR_VOICE="Trinoids"
HAPPY_VOICE="Cellos"
LASER_VOICE="Princess"
SAY_CMD="/usr/bin/say"
def say(msg, voice):
subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
class CallbackModule(object):
"""
makes Ansible much more exciting on OS X.
"""
def __init__(self):
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
if not os.path.exists(SAY_CMD):
self.disabled = True
print "%s does not exist, plugin %s disabled" % \
(SAY_CMD, os.path.basename(__file__))
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_ok(self, host, res):
say("pew", LASER_VOICE)
def runner_on_skipped(self, host, item=None):
say("pew", LASER_VOICE)
def runner_on_unreachable(self, host, res):
say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
say("pew", LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
say("Failure on host %s" % host, FAILED_VOICE)
def playbook_on_start(self):
say("Running Playbook", REGULAR_VOICE)
def playbook_on_notify(self, host, handler):
say("pew", LASER_VOICE)
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
say("Starting task: %s" % name, REGULAR_VOICE)
else:
say("Notifying task: %s" % name, REGULAR_VOICE)
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
say("Gathering facts", REGULAR_VOICE)
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
say("Starting play: %s" % name, HAPPY_VOICE)
def playbook_on_stats(self, stats):
say("Play complete", HAPPY_VOICE)
| gpl-3.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/sailfish/package.py | 3 | 1627 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Sailfish(CMakePackage):
"""Sailfish is a tool for transcript quantification from RNA-seq data."""
homepage = "http://www.cs.cmu.edu/~ckingsf/software/sailfish"
url = "https://github.com/kingsfordgroup/sailfish/archive/v0.10.0.tar.gz"
version('0.10.1', 'e6dab4cf3a39f346df7c28f40eb58cad')
depends_on('boost@1.55:')
depends_on('tbb')
| lgpl-2.1 |
cpcloud/toolz | toolz/recipes.py | 19 | 1287 | import itertools
from .itertoolz import frequencies, pluck, getter
from .compatibility import map
__all__ = ('countby', 'partitionby')
def countby(key, seq):
""" Count elements of a collection by a key function
>>> countby(len, ['cat', 'mouse', 'dog'])
{3: 2, 5: 1}
>>> def iseven(x): return x % 2 == 0
>>> countby(iseven, [1, 2, 3]) # doctest:+SKIP
{True: 1, False: 2}
See Also:
groupby
"""
if not callable(key):
key = getter(key)
return frequencies(map(key, seq))
def partitionby(func, seq):
""" Partition a sequence according to a function
Partition `s` into a sequence of lists such that, when traversing
`s`, every time the output of `func` changes a new list is started
and that and subsequent items are collected into that list.
>>> is_space = lambda c: c == " "
>>> list(partitionby(is_space, "I have space"))
[('I',), (' ',), ('h', 'a', 'v', 'e'), (' ',), ('s', 'p', 'a', 'c', 'e')]
>>> is_large = lambda x: x > 10
>>> list(partitionby(is_large, [1, 2, 1, 99, 88, 33, 99, -1, 5]))
[(1, 2, 1), (99, 88, 33, 99), (-1, 5)]
See also:
partition
groupby
itertools.groupby
"""
return map(tuple, pluck(1, itertools.groupby(seq, key=func)))
| bsd-3-clause |
jhsenjaliya/incubator-airflow | tests/dags/test_impersonation.py | 45 | 1219 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime
from textwrap import dedent
DEFAULT_DATE = datetime(2016, 1, 1)
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(dag_id='test_impersonation', default_args=args)
run_as_user = 'airflow_test_user'
test_command = dedent(
"""\
if [ '{user}' != "$(whoami)" ]; then
echo current user is not {user}!
exit 1
fi
""".format(user=run_as_user))
task = BashOperator(
task_id='test_impersonated_user',
bash_command=test_command,
dag=dag,
run_as_user=run_as_user,
)
| apache-2.0 |
antoyo/qutebrowser | qutebrowser/misc/sessions.py | 4 | 17153 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Management of sessions - saved tabs/windows."""
import os
import sip
import os.path
from PyQt5.QtCore import pyqtSignal, QUrl, QObject, QPoint, QTimer
from PyQt5.QtWidgets import QApplication
import yaml
try:
from yaml import CSafeLoader as YamlLoader, CSafeDumper as YamlDumper
except ImportError: # pragma: no cover
from yaml import SafeLoader as YamlLoader, SafeDumper as YamlDumper
from qutebrowser.browser.webkit import tabhistory
from qutebrowser.utils import (standarddir, objreg, qtutils, log, usertypes,
message)
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.mainwindow import mainwindow
from qutebrowser.config import config
default = object() # Sentinel value
def init(parent=None):
"""Initialize sessions.
Args:
parent: The parent to use for the SessionManager.
"""
data_dir = standarddir.data()
if data_dir is None:
base_path = None
else:
base_path = os.path.join(standarddir.data(), 'sessions')
try:
os.mkdir(base_path)
except FileExistsError:
pass
session_manager = SessionManager(base_path, parent)
objreg.register('session-manager', session_manager)
class SessionError(Exception):
"""Exception raised when a session failed to load/save."""
class SessionNotFoundError(SessionError):
"""Exception raised when a session to be loaded was not found."""
class SessionManager(QObject):
"""Manager for sessions.
Attributes:
_base_path: The path to store sessions under.
_last_window_session: The session data of the last window which was
closed.
_current: The name of the currently loaded session, or None.
did_load: Set when a session was loaded.
Signals:
update_completion: Emitted when the session completion should get
updated.
"""
update_completion = pyqtSignal()
def __init__(self, base_path, parent=None):
super().__init__(parent)
self._current = None
self._base_path = base_path
self._last_window_session = None
self.did_load = False
def _get_session_path(self, name, check_exists=False):
"""Get the session path based on a session name or absolute path.
Args:
name: The name of the session.
check_exists: Whether it should also be checked if the session
exists.
"""
path = os.path.expanduser(name)
if os.path.isabs(path) and ((not check_exists) or
os.path.exists(path)):
return path
elif self._base_path is None:
if check_exists:
raise SessionNotFoundError(name)
else:
return None
else:
path = os.path.join(self._base_path, name + '.yml')
if check_exists and not os.path.exists(path):
raise SessionNotFoundError(path)
else:
return path
def exists(self, name):
"""Check if a named session exists."""
try:
self._get_session_path(name, check_exists=True)
except SessionNotFoundError:
return False
else:
return True
def _save_tab_item(self, tab, idx, item):
"""Save a single history item in a tab.
Args:
tab: The tab to save.
idx: The index of the current history item.
item: The history item.
Return:
A dict with the saved data for this item.
"""
data = {
'url': bytes(item.url().toEncoded()).decode('ascii'),
}
if item.title():
data['title'] = item.title()
else:
# https://github.com/The-Compiler/qutebrowser/issues/879
if tab.history.current_idx() == idx:
data['title'] = tab.title()
else:
data['title'] = data['url']
if item.originalUrl() != item.url():
encoded = item.originalUrl().toEncoded()
data['original-url'] = bytes(encoded).decode('ascii')
if tab.history.current_idx() == idx:
data['active'] = True
try:
user_data = item.userData()
except AttributeError:
# QtWebEngine
user_data = None
if tab.history.current_idx() == idx:
pos = tab.scroller.pos_px()
data['zoom'] = tab.zoom.factor()
data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
elif user_data is not None:
if 'zoom' in user_data:
data['zoom'] = user_data['zoom']
if 'scroll-pos' in user_data:
pos = user_data['scroll-pos']
data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
return data
def _save_tab(self, tab, active):
"""Get a dict with data for a single tab.
Args:
tab: The WebView to save.
active: Whether the tab is currently active.
"""
data = {'history': []}
if active:
data['active'] = True
for idx, item in enumerate(tab.history):
qtutils.ensure_valid(item)
item_data = self._save_tab_item(tab, idx, item)
data['history'].append(item_data)
return data
def _save_all(self):
"""Get a dict with data for all windows/tabs."""
data = {'windows': []}
for win_id in objreg.window_registry:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
main_window = objreg.get('main-window', scope='window',
window=win_id)
# We could be in the middle of destroying a window here
if sip.isdeleted(main_window):
continue
win_data = {}
active_window = QApplication.instance().activeWindow()
if getattr(active_window, 'win_id', None) == win_id:
win_data['active'] = True
win_data['geometry'] = bytes(main_window.saveGeometry())
win_data['tabs'] = []
for i, tab in enumerate(tabbed_browser.widgets()):
active = i == tabbed_browser.currentIndex()
win_data['tabs'].append(self._save_tab(tab, active))
data['windows'].append(win_data)
return data
def _get_session_name(self, name):
"""Helper for save to get the name to save the session to.
Args:
name: The name of the session to save, or the 'default' sentinel
object.
"""
if name is default:
name = config.get('general', 'session-default-name')
if name is None:
if self._current is not None:
name = self._current
else:
name = 'default'
return name
def save(self, name, last_window=False, load_next_time=False):
"""Save a named session.
Args:
name: The name of the session to save, or the 'default' sentinel
object.
last_window: If set, saves the saved self._last_window_session
instead of the currently open state.
load_next_time: If set, prepares this session to be load next time.
Return:
The name of the saved session.
"""
name = self._get_session_name(name)
path = self._get_session_path(name)
if path is None:
raise SessionError("No data storage configured.")
log.sessions.debug("Saving session {} to {}...".format(name, path))
if last_window:
data = self._last_window_session
if data is None:
log.sessions.error("last_window_session is None while saving!")
return
else:
data = self._save_all()
log.sessions.vdebug("Saving data: {}".format(data))
try:
with qtutils.savefile_open(path) as f:
yaml.dump(data, f, Dumper=YamlDumper, default_flow_style=False,
encoding='utf-8', allow_unicode=True)
except (OSError, UnicodeEncodeError, yaml.YAMLError) as e:
raise SessionError(e)
else:
self.update_completion.emit()
if load_next_time:
state_config = objreg.get('state-config')
state_config['general']['session'] = name
return name
def save_last_window_session(self):
"""Temporarily save the session for the last closed window."""
self._last_window_session = self._save_all()
def _load_tab(self, new_tab, data):
"""Load yaml data into a newly opened tab."""
entries = []
for histentry in data['history']:
user_data = {}
if 'zoom' in data:
# The zoom was accidentally stored in 'data' instead of per-tab
# earlier.
# See https://github.com/The-Compiler/qutebrowser/issues/728
user_data['zoom'] = data['zoom']
elif 'zoom' in histentry:
user_data['zoom'] = histentry['zoom']
if 'scroll-pos' in data:
# The scroll position was accidentally stored in 'data' instead
# of per-tab earlier.
# See https://github.com/The-Compiler/qutebrowser/issues/728
pos = data['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
elif 'scroll-pos' in histentry:
pos = histentry['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
active = histentry.get('active', False)
url = QUrl.fromEncoded(histentry['url'].encode('ascii'))
if 'original-url' in histentry:
orig_url = QUrl.fromEncoded(
histentry['original-url'].encode('ascii'))
else:
orig_url = url
entry = tabhistory.TabHistoryItem(
url=url, original_url=orig_url, title=histentry['title'],
active=active, user_data=user_data)
entries.append(entry)
if active:
new_tab.title_changed.emit(histentry['title'])
try:
new_tab.history.load_items(entries)
except ValueError as e:
raise SessionError(e)
def load(self, name, temp=False):
"""Load a named session.
Args:
name: The name of the session to load.
temp: If given, don't set the current session.
"""
path = self._get_session_path(name, check_exists=True)
try:
with open(path, encoding='utf-8') as f:
data = yaml.load(f, Loader=YamlLoader)
except (OSError, UnicodeDecodeError, yaml.YAMLError) as e:
raise SessionError(e)
log.sessions.debug("Loading session {} from {}...".format(name, path))
for win in data['windows']:
window = mainwindow.MainWindow(geometry=win['geometry'])
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
tab_to_focus = None
for i, tab in enumerate(win['tabs']):
new_tab = tabbed_browser.tabopen()
self._load_tab(new_tab, tab)
if tab.get('active', False):
tab_to_focus = i
if tab_to_focus is not None:
tabbed_browser.setCurrentIndex(tab_to_focus)
if win.get('active', False):
QTimer.singleShot(0, tabbed_browser.activateWindow)
self.did_load = True
if not name.startswith('_') and not temp:
self._current = name
def delete(self, name):
"""Delete a session."""
path = self._get_session_path(name, check_exists=True)
os.remove(path)
self.update_completion.emit()
def list_sessions(self):
"""Get a list of all session names."""
sessions = []
if self._base_path is None:
return sessions
for filename in os.listdir(self._base_path):
base, ext = os.path.splitext(filename)
if ext == '.yml':
sessions.append(base)
return sessions
@cmdutils.register(instance='session-manager')
@cmdutils.argument('name', completion=usertypes.Completion.sessions)
def session_load(self, name, clear=False, temp=False, force=False):
"""Load a session.
Args:
name: The name of the session.
clear: Close all existing windows.
temp: Don't set the current session for :session-save.
force: Force loading internal sessions (starting with an
underline).
"""
if name.startswith('_') and not force:
raise cmdexc.CommandError("{} is an internal session, use --force "
"to load anyways.".format(name))
old_windows = list(objreg.window_registry.values())
try:
self.load(name, temp=temp)
except SessionNotFoundError:
raise cmdexc.CommandError("Session {} not found!".format(name))
except SessionError as e:
raise cmdexc.CommandError("Error while loading session: {}"
.format(e))
else:
if clear:
for win in old_windows:
win.close()
@cmdutils.register(name=['session-save', 'w'], instance='session-manager')
@cmdutils.argument('win_id', win_id=True)
@cmdutils.argument('name', completion=usertypes.Completion.sessions)
def session_save(self, win_id, name: str=default, current=False,
quiet=False, force=False):
"""Save a session.
Args:
win_id: The current window ID.
name: The name of the session. If not given, the session configured
in general -> session-default-name is saved.
current: Save the current session instead of the default.
quiet: Don't show confirmation message.
force: Force saving internal sessions (starting with an underline).
"""
if (name is not default and
name.startswith('_') and # pylint: disable=no-member
not force):
raise cmdexc.CommandError("{} is an internal session, use --force "
"to save anyways.".format(name))
if current:
if self._current is None:
raise cmdexc.CommandError("No session loaded currently!")
name = self._current
assert not name.startswith('_')
try:
name = self.save(name)
except SessionError as e:
raise cmdexc.CommandError("Error while saving session: {}"
.format(e))
else:
if not quiet:
message.info(win_id, "Saved session {}.".format(name),
immediately=True)
@cmdutils.register(instance='session-manager')
@cmdutils.argument('name', completion=usertypes.Completion.sessions)
def session_delete(self, name, force=False):
"""Delete a session.
Args:
name: The name of the session.
force: Force deleting internal sessions (starting with an
underline).
"""
if name.startswith('_') and not force:
raise cmdexc.CommandError("{} is an internal session, use --force "
"to delete anyways.".format(name))
try:
self.delete(name)
except SessionNotFoundError:
raise cmdexc.CommandError("Session {} not found!".format(name))
except (OSError, SessionError) as e:
log.sessions.exception("Error while deleting session!")
raise cmdexc.CommandError("Error while deleting session: {}"
.format(e))
| gpl-3.0 |
iansprice/wagtail | wagtail/wagtailadmin/tests/test_messages.py | 8 | 1326 | from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
class TestPageExplorer(TestCase):
@override_settings(MESSAGE_TAGS={
messages.DEBUG: 'my-custom-tag',
messages.INFO: 'my-custom-tag',
messages.SUCCESS: 'my-custom-tag',
messages.WARNING: 'my-custom-tag',
messages.ERROR: 'my-custom-tag',
})
def test_message_tag_classes(self):
url = reverse('testapp_message_test')
response = self.client.post(url, {'level': 'success', 'message': 'A message'},
follow=True)
# Make sure the message appears
self.assertContains(response, 'A message')
# Make sure the Wagtail-require CSS tag appears
self.assertContains(response, 'success')
# Make sure the classes set in the settings do *not* appear
self.assertNotContains(response, 'my-custom-tag')
response = self.client.post(url, {'level': 'error', 'message': 'Danger danger!'},
follow=True)
self.assertContains(response, 'Danger danger!')
self.assertContains(response, 'error')
self.assertNotContains(response, 'my-custom-tag')
| bsd-3-clause |
Cuuuurzel/KiPyCalc | sympy/polys/distributedmodules.py | 23 | 21826 | r"""
Sparse distributed elements of free modules over multivariate (generalized)
polynomial rings.
This code and its data structures are very much like the distributed
polynomials, except that the first "exponent" of the monomial is
a module generator index. That is, the multi-exponent ``(i, e_1, ..., e_n)``
represents the "monomial" `x_1^{e_1} \dots x_n^{e_n} f_i` of the free module
`F` generated by `f_1, \dots, f_r` over (a localization of) the ring
`K[x_1, \dots, x_n]`. A module element is simply stored as a list of terms
ordered by the monomial order. Here a term is a pair of a multi-exponent and a
coefficient. In general, this coefficient should never be zero (since it can
then be omitted). The zero module element is stored as an empty list.
The main routines are ``sdm_nf_mora`` and ``sdm_groebner`` which can be used
to compute, respectively, weak normal forms and standard bases. They work with
arbitrary (not necessarily global) monomial orders.
In general, product orders have to be used to construct valid monomial orders
for modules. However, ``lex`` can be used as-is.
Note that the "level" (number of variables, i.e. parameter u+1 in
distributedpolys.py) is never needed in this code.
The main reference for this file is [SCA],
"A Singular Introduction to Commutative Algebra".
"""
from __future__ import print_function, division
from itertools import permutations
from sympy.polys.monomials import (
monomial_mul, monomial_lcm, monomial_div, monomial_deg, monomial_divides
)
from sympy.polys.polytools import Poly
from sympy.polys.polyutils import parallel_dict_from_expr
from sympy import S, sympify
# Additional monomial tools.
def sdm_monomial_mul(M, X):
"""
Multiply tuple ``X`` representing a monomial of `K[X]` into the tuple
``M`` representing a monomial of `F`.
Examples
========
Multiplying `xy^3` into `x f_1` yields `x^2 y^3 f_1`:
>>> from sympy.polys.distributedmodules import sdm_monomial_mul
>>> sdm_monomial_mul((1, 1, 0), (1, 3))
(1, 2, 3)
"""
return (M[0],) + monomial_mul(X, M[1:])
def sdm_monomial_deg(M):
"""
Return the total degree of ``M``.
Examples
========
For example, the total degree of `x^2 y f_5` is 3:
>>> from sympy.polys.distributedmodules import sdm_monomial_deg
>>> sdm_monomial_deg((5, 2, 1))
3
"""
return monomial_deg(M[1:])
def sdm_monomial_lcm(A, B):
"""
Return the "least common multiple" of ``A`` and ``B``.
IF `A = M e_j` and `B = N e_j`, where `M` and `N` are polynomial monomials,
this returns `\lcm(M, N) e_j`. Note that ``A`` and ``B`` involve distinct
monomials.
Otherwise the result is undefined.
>>> from sympy.polys.distributedmodules import sdm_monomial_lcm
>>> sdm_monomial_lcm((1, 2, 3), (1, 0, 5))
(1, 2, 5)
"""
return (A[0],) + monomial_lcm(A[1:], B[1:])
def sdm_monomial_divides(A, B):
"""
Does there exist a (polynomial) monomial X such that XA = B?
Examples
========
Positive examples:
In the following examples, the monomial is given in terms of x, y and the
generator(s), f_1, f_2 etc. The tuple form of that monomial is used in
the call to sdm_monomial_divides.
Note: the generator appears last in the expression but first in the tuple
and other factors appear in the same order that they appear in the monomial
expression.
`A = f_1` divides `B = f_1`
>>> from sympy.polys.distributedmodules import sdm_monomial_divides
>>> sdm_monomial_divides((1, 0, 0), (1, 0, 0))
True
`A = f_1` divides `B = x^2 y f_1`
>>> sdm_monomial_divides((1, 0, 0), (1, 2, 1))
True
`A = xy f_5` divides `B = x^2 y f_5`
>>> sdm_monomial_divides((5, 1, 1), (5, 2, 1))
True
Negative examples:
`A = f_1` does not divide `B = f_2`
>>> sdm_monomial_divides((1, 0, 0), (2, 0, 0))
False
`A = x f_1` does not divide `B = f_1`
>>> sdm_monomial_divides((1, 1, 0), (1, 0, 0))
False
`A = xy^2 f_5` does not divide `B = y f_5`
>>> sdm_monomial_divides((5, 1, 2), (5, 0, 1))
False
"""
return A[0] == B[0] and all(a <= b for a, b in zip(A[1:], B[1:]))
# The actual distributed modules code.
def sdm_LC(f, K):
"""Returns the leading coeffcient of ``f``. """
if not f:
return K.zero
else:
return f[0][1]
def sdm_to_dict(f):
"""Make a dictionary from a distributed polynomial. """
return dict(f)
def sdm_from_dict(d, O):
"""
Create an sdm from a dictionary.
Here ``O`` is the monomial order to use.
>>> from sympy.polys.distributedmodules import sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 1, 0): QQ(1), (1, 0, 0): QQ(2), (0, 1, 0): QQ(0)}
>>> sdm_from_dict(dic, lex)
[((1, 1, 0), 1), ((1, 0, 0), 2)]
"""
return sdm_strip(sdm_sort(list(d.items()), O))
def sdm_sort(f, O):
"""Sort terms in ``f`` using the given monomial order ``O``. """
return sorted(f, key=lambda term: O(term[0]), reverse=True)
def sdm_strip(f):
"""Remove terms with zero coefficients from ``f`` in ``K[X]``. """
return [ (monom, coeff) for monom, coeff in f if coeff ]
def sdm_add(f, g, O, K):
"""
Add two module elements ``f``, ``g``.
Addition is done over the ground field ``K``, monomials are ordered
according to ``O``.
Examples
========
All examples use lexicographic order.
`(xy f_1) + (f_2) = f_2 + xy f_1`
>>> from sympy.polys.distributedmodules import sdm_add
>>> from sympy.polys import lex, QQ
>>> sdm_add([((1, 1, 1), QQ(1))], [((2, 0, 0), QQ(1))], lex, QQ)
[((2, 0, 0), 1), ((1, 1, 1), 1)]
`(xy f_1) + (-xy f_1)` = 0`
>>> sdm_add([((1, 1, 1), QQ(1))], [((1, 1, 1), QQ(-1))], lex, QQ)
[]
`(f_1) + (2f_1) = 3f_1`
>>> sdm_add([((1, 0, 0), QQ(1))], [((1, 0, 0), QQ(2))], lex, QQ)
[((1, 0, 0), 3)]
`(yf_1) + (xf_1) = xf_1 + yf_1`
>>> sdm_add([((1, 0, 1), QQ(1))], [((1, 1, 0), QQ(1))], lex, QQ)
[((1, 1, 0), 1), ((1, 0, 1), 1)]
"""
h = dict(f)
for monom, c in g:
if monom in h:
coeff = h[monom] + c
if not coeff:
del h[monom]
else:
h[monom] = coeff
else:
h[monom] = c
return sdm_from_dict(h, O)
def sdm_LM(f):
r"""
Returns the leading monomial of ``f``.
Only valid if `f \ne 0`.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_LM, sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(1), (4, 0, 1): QQ(1)}
>>> sdm_LM(sdm_from_dict(dic, lex))
(4, 0, 1)
"""
return f[0][0]
def sdm_LT(f):
r"""
Returns the leading term of ``f``.
Only valid if `f \ne 0`.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_LT, sdm_from_dict
>>> from sympy.polys import QQ, lex
>>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(2), (4, 0, 1): QQ(3)}
>>> sdm_LT(sdm_from_dict(dic, lex))
((4, 0, 1), 3)
"""
return f[0]
def sdm_mul_term(f, term, O, K):
"""
Multiply a distributed module element ``f`` by a (polynomial) term ``term``.
Multiplication of coefficients is done over the ground field ``K``, and
monomials are ordered according to ``O``.
Examples
========
`0 f_1 = 0`
>>> from sympy.polys.distributedmodules import sdm_mul_term
>>> from sympy.polys import lex, QQ
>>> sdm_mul_term([((1, 0, 0), QQ(1))], ((0, 0), QQ(0)), lex, QQ)
[]
`x 0 = 0`
>>> sdm_mul_term([], ((1, 0), QQ(1)), lex, QQ)
[]
`(x) (f_1) = xf_1`
>>> sdm_mul_term([((1, 0, 0), QQ(1))], ((1, 0), QQ(1)), lex, QQ)
[((1, 1, 0), 1)]
`(2xy) (3x f_1 + 4y f_2) = 8xy^2 f_2 + 6x^2y f_1`
>>> f = [((2, 0, 1), QQ(4)), ((1, 1, 0), QQ(3))]
>>> sdm_mul_term(f, ((1, 1), QQ(2)), lex, QQ)
[((2, 1, 2), 8), ((1, 2, 1), 6)]
"""
X, c = term
if not f or not c:
return []
else:
if K.is_one(c):
return [ (sdm_monomial_mul(f_M, X), f_c) for f_M, f_c in f ]
else:
return [ (sdm_monomial_mul(f_M, X), f_c * c) for f_M, f_c in f ]
def sdm_zero():
"""Return the zero module element."""
return []
def sdm_deg(f):
"""
Degree of ``f``.
This is the maximum of the degrees of all its monomials.
Invalid if ``f`` is zero.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_deg
>>> sdm_deg([((1, 2, 3), 1), ((10, 0, 1), 1), ((2, 3, 4), 4)])
7
"""
return max(sdm_monomial_deg(M[0]) for M in f)
# Conversion
def sdm_from_vector(vec, O, K, **opts):
"""
Create an sdm from an iterable of expressions.
Coefficients are created in the ground field ``K``, and terms are ordered
according to monomial order ``O``. Named arguments are passed on to the
polys conversion code and can be used to specify for example generators.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_from_vector
>>> from sympy.abc import x, y, z
>>> from sympy.polys import QQ, lex
>>> sdm_from_vector([x**2+y**2, 2*z], lex, QQ)
[((1, 0, 0, 1), 2), ((0, 2, 0, 0), 1), ((0, 0, 2, 0), 1)]
"""
dics, gens = parallel_dict_from_expr(sympify(vec), **opts)
dic = {}
for i, d in enumerate(dics):
for k, v in d.items():
dic[(i,) + k] = K.convert(v)
return sdm_from_dict(dic, O)
def sdm_to_vector(f, gens, K, n=None):
"""
Convert sdm ``f`` into a list of polynomial expressions.
The generators for the polynomial ring are specified via ``gens``. The rank
of the module is guessed, or passed via ``n``. The ground field is assumed
to be ``K``.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_to_vector
>>> from sympy.abc import x, y, z
>>> from sympy.polys import QQ, lex
>>> f = [((1, 0, 0, 1), QQ(2)), ((0, 2, 0, 0), QQ(1)), ((0, 0, 2, 0), QQ(1))]
>>> sdm_to_vector(f, [x, y, z], QQ)
[x**2 + y**2, 2*z]
"""
dic = sdm_to_dict(f)
dics = {}
for k, v in dic.items():
dics.setdefault(k[0], []).append((k[1:], v))
n = n or len(dics)
res = []
for k in range(n):
if k in dics:
res.append(Poly(dict(dics[k]), gens=gens, domain=K).as_expr())
else:
res.append(S.Zero)
return res
# Algorithms.
def sdm_spoly(f, g, O, K, phantom=None):
"""
Compute the generalized s-polynomial of ``f`` and ``g``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
This is invalid if either of ``f`` or ``g`` is zero.
If the leading terms of `f` and `g` involve different basis elements of
`F`, their s-poly is defined to be zero. Otherwise it is a certain linear
combination of `f` and `g` in which the leading terms cancel.
See [SCA, defn 2.3.6] for details.
If ``phantom`` is not ``None``, it should be a pair of module elements on
which to perform the same operation(s) as on ``f`` and ``g``. The in this
case both results are returned.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_spoly
>>> from sympy.polys import QQ, lex
>>> f = [((2, 1, 1), QQ(1)), ((1, 0, 1), QQ(1))]
>>> g = [((2, 3, 0), QQ(1))]
>>> h = [((1, 2, 3), QQ(1))]
>>> sdm_spoly(f, h, lex, QQ)
[]
>>> sdm_spoly(f, g, lex, QQ)
[((1, 2, 1), 1)]
"""
if not f or not g:
return sdm_zero()
LM1 = sdm_LM(f)
LM2 = sdm_LM(g)
if LM1[0] != LM2[0]:
return sdm_zero()
LM1 = LM1[1:]
LM2 = LM2[1:]
lcm = monomial_lcm(LM1, LM2)
m1 = monomial_div(lcm, LM1)
m2 = monomial_div(lcm, LM2)
c = K.quo(-sdm_LC(f, K), sdm_LC(g, K))
r1 = sdm_add(sdm_mul_term(f, (m1, K.one), O, K),
sdm_mul_term(g, (m2, c), O, K), O, K)
if phantom is None:
return r1
r2 = sdm_add(sdm_mul_term(phantom[0], (m1, K.one), O, K),
sdm_mul_term(phantom[1], (m2, c), O, K), O, K)
return r1, r2
def sdm_ecart(f):
"""
Compute the ecart of ``f``.
This is defined to be the difference of the total degree of `f` and the
total degree of the leading monomial of `f` [SCA, defn 2.3.7].
Invalid if f is zero.
Examples
========
>>> from sympy.polys.distributedmodules import sdm_ecart
>>> sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)])
0
>>> sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)])
3
"""
return sdm_deg(f) - sdm_monomial_deg(sdm_LM(f))
def sdm_nf_mora(f, G, O, K, phantom=None):
r"""
Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
Weak normal forms are defined in [SCA, defn 2.3.3]. They are not unique.
This function deterministically computes a weak normal form, depending on
the order of `G`.
The most important property of a weak normal form is the following: if
`R` is the ring associated with the monomial ordering (if the ordering is
global, we just have `R = K[x_1, \dots, x_n]`, otherwise it is a certain
localization thereof), `I` any ideal of `R` and `G` a standard basis for
`I`, then for any `f \in R`, we have `f \in I` if and only if
`NF(f | G) = 0`.
This is the generalized Mora algorithm for computing weak normal forms with
respect to arbitrary monomial orders [SCA, algorithm 2.3.9].
If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments
on which to perform the same computations as on ``f``, ``G``, both results
are then returned.
"""
from itertools import repeat
h = f
T = list(G)
if phantom is not None:
# "phantom" variables with suffix p
hp = phantom[0]
Tp = list(phantom[1])
phantom = True
else:
Tp = repeat([])
phantom = False
while h:
# TODO better data structure!!!
Th = [(g, sdm_ecart(g), gp) for g, gp in zip(T, Tp)
if sdm_monomial_divides(sdm_LM(g), sdm_LM(h))]
if not Th:
break
g, _, gp = min(Th, key=lambda x: x[1])
if sdm_ecart(g) > sdm_ecart(h):
T.append(h)
if phantom:
Tp.append(hp)
if phantom:
h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp))
else:
h = sdm_spoly(h, g, O, K)
if phantom:
return h, hp
return h
def sdm_nf_buchberger(f, G, O, K, phantom=None):
r"""
Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
This is the standard Buchberger algorithm for computing weak normal forms with
respect to *global* monomial orders [SCA, algorithm 1.6.10].
If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments
on which to perform the same computations as on ``f``, ``G``, both results
are then returned.
"""
from itertools import repeat
h = f
T = list(G)
if phantom is not None:
# "phantom" variables with suffix p
hp = phantom[0]
Tp = list(phantom[1])
phantom = True
else:
Tp = repeat([])
phantom = False
while h:
try:
g, gp = next((g, gp) for g, gp in zip(T, Tp)
if sdm_monomial_divides(sdm_LM(g), sdm_LM(h)))
except StopIteration:
break
if phantom:
h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp))
else:
h = sdm_spoly(h, g, O, K)
if phantom:
return h, hp
return h
def sdm_nf_buchberger_reduced(f, G, O, K):
r"""
Compute a reduced normal form of ``f`` with respect to ``G`` and order ``O``.
The ground field is assumed to be ``K``, and monomials ordered according to
``O``.
In contrast to weak normal forms, reduced normal forms *are* unique, but
their computation is more expensive.
This is the standard Buchberger algorithm for computing reduced normal forms
with respect to *global* monomial orders [SCA, algorithm 1.6.11].
The ``pantom`` option is not supported, so this normal form cannot be used
as a normal form for the "extended" groebner algorithm.
"""
h = sdm_zero()
g = f
while g:
g = sdm_nf_buchberger(g, G, O, K)
if g:
h = sdm_add(h, [sdm_LT(g)], O, K)
g = g[1:]
return h
def sdm_groebner(G, NF, O, K, extended=False):
"""
Compute a minimal standard basis of ``G`` with respect to order ``O``.
The algorithm uses a normal form ``NF``, for example ``sdm_nf_mora``.
The ground field is assumed to be ``K``, and monomials ordered according
to ``O``.
Let `N` denote the submodule generated by elements of `G`. A standard
basis for `N` is a subset `S` of `N`, such that `in(S) = in(N)`, where for
any subset `X` of `F`, `in(X)` denotes the submodule generated by the
initial forms of elements of `X`. [SCA, defn 2.3.2]
A standard basis is called minimal if no subset of it is a standard basis.
One may show that standard bases are always generating sets.
Minimal standard bases are not unique. This algorithm computes a
deterministic result, depending on the particular order of `G`.
If ``extended=True``, also compute the transition matrix from the initial
generators to the groebner basis. That is, return a list of coefficient
vectors, expressing the elements of the groebner basis in terms of the
elements of ``G``.
This functions implements the "sugar" strategy, see
Giovini et al: "One sugar cube, please" OR Selection strategies in
Buchberger algorithm.
"""
# The critical pair set.
# A critical pair is stored as (i, j, s, t) where (i, j) defines the pair
# (by indexing S), s is the sugar of the pair, and t is the lcm of their
# leading monomials.
P = []
# The eventual standard basis.
S = []
Sugars = []
def Ssugar(i, j):
"""Compute the sugar of the S-poly corresponding to (i, j)."""
LMi = sdm_LM(S[i])
LMj = sdm_LM(S[j])
return max(Sugars[i] - sdm_monomial_deg(LMi),
Sugars[j] - sdm_monomial_deg(LMj)) \
+ sdm_monomial_deg(sdm_monomial_lcm(LMi, LMj))
ourkey = lambda p: (p[2], O(p[3]), p[1])
def update(f, sugar, P):
"""Add f with sugar ``sugar`` to S, update P."""
if not f:
return P
k = len(S)
S.append(f)
Sugars.append(sugar)
LMf = sdm_LM(f)
def removethis(pair):
i, j, s, t = pair
if LMf[0] != t[0]:
return False
tik = sdm_monomial_lcm(LMf, sdm_LM(S[i]))
tjk = sdm_monomial_lcm(LMf, sdm_LM(S[j]))
return tik != t and tjk != t and sdm_monomial_divides(tik, t) and \
sdm_monomial_divides(tjk, t)
# apply the chain criterion
P = [p for p in P if not removethis(p)]
# new-pair set
N = [(i, k, Ssugar(i, k), sdm_monomial_lcm(LMf, sdm_LM(S[i])))
for i in range(k) if LMf[0] == sdm_LM(S[i])[0]]
# TODO apply the product criterion?
N.sort(key=ourkey)
remove = set()
for i, p in enumerate(N):
for j in range(i + 1, len(N)):
if sdm_monomial_divides(p[3], N[j][3]):
remove.add(j)
# TODO mergesort?
P.extend(reversed([p for i, p in enumerate(N) if not i in remove]))
P.sort(key=ourkey, reverse=True)
# NOTE reverse-sort, because we want to pop from the end
return P
# Figure out the number of generators in the ground ring.
try:
# NOTE: we look for the first non-zero vector, take its first monomial
# the number of generators in the ring is one less than the length
# (since the zeroth entry is for the module generators)
numgens = len(next(x[0] for x in G if x)[0]) - 1
except StopIteration:
# No non-zero elements in G ...
if extended:
return [], []
return []
# This list will store expressions of the elements of S in terms of the
# initial generators
coefficients = []
# First add all the elements of G to S
for i, f in enumerate(G):
P = update(f, sdm_deg(f), P)
if extended and f:
coefficients.append(sdm_from_dict({(i,) + (0,)*numgens: K(1)}, O))
# Now carry out the buchberger algorithm.
while P:
i, j, s, t = P.pop()
f, sf, g, sg = S[i], Sugars[i], S[j], Sugars[j]
if extended:
sp, coeff = sdm_spoly(f, g, O, K,
phantom=(coefficients[i], coefficients[j]))
h, hcoeff = NF(sp, S, O, K, phantom=(coeff, coefficients))
if h:
coefficients.append(hcoeff)
else:
h = NF(sdm_spoly(f, g, O, K), S, O, K)
P = update(h, Ssugar(i, j), P)
# Finally interreduce the standard basis.
# (TODO again, better data structures)
S = set((tuple(f), i) for i, f in enumerate(S))
for (a, ai), (b, bi) in permutations(S, 2):
A = sdm_LM(a)
B = sdm_LM(b)
if sdm_monomial_divides(A, B) and (b, bi) in S and (a, ai) in S:
S.remove((b, bi))
L = sorted(((list(f), i) for f, i in S), key=lambda p: O(sdm_LM(p[0])),
reverse=True)
res = [x[0] for x in L]
if extended:
return res, [coefficients[i] for _, i in L]
return res
| mit |
obeattie/sqlalchemy | lib/sqlalchemy/engine/reflection.py | 1 | 13666 | """Provides an abstraction for obtaining database schema information.
Usage Notes:
Here are some general conventions when accessing the low level inspector
methods such as get_table_names, get_columns, etc.
1. Inspector methods return lists of dicts in most cases for the following
reasons:
* They're both standard types that can be serialized.
* Using a dict instead of a tuple allows easy expansion of attributes.
* Using a list for the outer structure maintains order and is easy to work
with (e.g. list comprehension [d['name'] for d in cols]).
2. Records that contain a name, such as the column name in a column record
use the key 'name'. So for most return values, each record will have a
'name' attribute..
"""
import sqlalchemy
from sqlalchemy import exc, sql
from sqlalchemy import util
from sqlalchemy.types import TypeEngine
from sqlalchemy import schema as sa_schema
@util.decorator
def cache(fn, self, con, *args, **kw):
info_cache = kw.get('info_cache', None)
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, basestring)),
tuple((k, v) for k, v in kw.iteritems() if isinstance(v, (basestring, int, float)))
)
ret = info_cache.get(key)
if ret is None:
ret = fn(self, con, *args, **kw)
info_cache[key] = ret
return ret
class Inspector(object):
"""Performs database schema inspection.
The Inspector acts as a proxy to the dialects' reflection methods and
provides higher level functions for accessing database schema information.
"""
def __init__(self, conn):
"""Initialize the instance.
:param conn: a :class:`~sqlalchemy.engine.base.Connectable`
"""
self.conn = conn
# set the engine
if hasattr(conn, 'engine'):
self.engine = conn.engine
else:
self.engine = conn
self.dialect = self.engine.dialect
self.info_cache = {}
@classmethod
def from_engine(cls, engine):
if hasattr(engine.dialect, 'inspector'):
return engine.dialect.inspector(engine)
return Inspector(engine)
@property
def default_schema_name(self):
return self.dialect.default_schema_name
def get_schema_names(self):
"""Return all schema names.
"""
if hasattr(self.dialect, 'get_schema_names'):
return self.dialect.get_schema_names(self.conn,
info_cache=self.info_cache)
return []
def get_table_names(self, schema=None, order_by=None):
"""Return all table names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
:param order_by: Optional, may be the string "foreign_key" to sort
the result on foreign key dependencies.
This should probably not return view names or maybe it should return
them with an indicator t or v.
"""
if hasattr(self.dialect, 'get_table_names'):
tnames = self.dialect.get_table_names(self.conn,
schema,
info_cache=self.info_cache)
else:
tnames = self.engine.table_names(schema)
if order_by == 'foreign_key':
ordered_tnames = tnames[:]
# Order based on foreign key dependencies.
for tname in tnames:
table_pos = tnames.index(tname)
fkeys = self.get_foreign_keys(tname, schema)
for fkey in fkeys:
rtable = fkey['referred_table']
if rtable in ordered_tnames:
ref_pos = ordered_tnames.index(rtable)
# Make sure it's lower in the list than anything it
# references.
if table_pos > ref_pos:
ordered_tnames.pop(table_pos) # rtable moves up 1
# insert just below rtable
ordered_tnames.index(ref_pos, tname)
tnames = ordered_tnames
return tnames
def get_table_options(self, table_name, schema=None, **kw):
if hasattr(self.dialect, 'get_table_options'):
return self.dialect.get_table_options(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
return {}
def get_view_names(self, schema=None):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
"""
return self.dialect.get_view_names(self.conn, schema,
info_cache=self.info_cache)
def get_view_definition(self, view_name, schema=None):
"""Return definition for `view_name`.
:param schema: Optional, retrieve names from a non-default schema.
"""
return self.dialect.get_view_definition(
self.conn, view_name, schema, info_cache=self.info_cache)
def get_columns(self, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a string `table_name` and an optional string `schema`, return
column information as a list of dicts with these keys:
name
the column's name
type
:class:`~sqlalchemy.types.TypeEngine`
nullable
boolean
default
the column's default value
attrs
dict containing optional column attributes
"""
col_defs = self.dialect.get_columns(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
for col_def in col_defs:
# make this easy and only return instances for coltype
coltype = col_def['type']
if not isinstance(coltype, TypeEngine):
col_def['type'] = coltype()
return col_defs
def get_primary_keys(self, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a list of column names.
"""
pkeys = self.dialect.get_primary_keys(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
return pkeys
def get_foreign_keys(self, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
\**kw
other options passed to the dialect's get_foreign_keys() method.
"""
fk_defs = self.dialect.get_foreign_keys(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
return fk_defs
def get_indexes(self, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a string `table_name` and an optional string `schema`, return
index information as a list of dicts with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
\**kw
other options passed to the dialect's get_indexes() method.
"""
indexes = self.dialect.get_indexes(self.conn, table_name,
schema,
info_cache=self.info_cache, **kw)
return indexes
def reflecttable(self, table, include_columns):
dialect = self.conn.dialect
# MySQL dialect does this. Applicable with other dialects?
if hasattr(dialect, '_connection_charset') \
and hasattr(dialect, '_adjust_casing'):
charset = dialect._connection_charset
dialect._adjust_casing(table)
# table attributes we might need.
reflection_options = dict(
(k, table.kwargs.get(k)) for k in dialect.reflection_options if k in table.kwargs)
schema = table.schema
table_name = table.name
# apply table options
tbl_opts = self.get_table_options(table_name, schema, **table.kwargs)
if tbl_opts:
table.kwargs.update(tbl_opts)
# table.kwargs will need to be passed to each reflection method. Make
# sure keywords are strings.
tblkw = table.kwargs.copy()
for (k, v) in tblkw.items():
del tblkw[k]
tblkw[str(k)] = v
# Py2K
if isinstance(schema, str):
schema = schema.decode(dialect.encoding)
if isinstance(table_name, str):
table_name = table_name.decode(dialect.encoding)
# end Py2K
# columns
found_table = False
for col_d in self.get_columns(table_name, schema, **tblkw):
found_table = True
name = col_d['name']
if include_columns and name not in include_columns:
continue
coltype = col_d['type']
col_kw = {
'nullable':col_d['nullable'],
}
if 'autoincrement' in col_d:
col_kw['autoincrement'] = col_d['autoincrement']
if 'quote' in col_d:
col_kw['quote'] = col_d['quote']
colargs = []
if col_d.get('default') is not None:
# the "default" value is assumed to be a literal SQL expression,
# so is wrapped in text() so that no quoting occurs on re-issuance.
colargs.append(sa_schema.DefaultClause(sql.text(col_d['default'])))
if 'sequence' in col_d:
# TODO: mssql, maxdb and sybase are using this.
seq = col_d['sequence']
sequence = sa_schema.Sequence(seq['name'], 1, 1)
if 'start' in seq:
sequence.start = seq['start']
if 'increment' in seq:
sequence.increment = seq['increment']
colargs.append(sequence)
col = sa_schema.Column(name, coltype, *colargs, **col_kw)
table.append_column(col)
if not found_table:
raise exc.NoSuchTableError(table.name)
# Primary keys
primary_key_constraint = sa_schema.PrimaryKeyConstraint(*[
table.c[pk] for pk in self.get_primary_keys(table_name, schema, **tblkw)
if pk in table.c
])
table.append_constraint(primary_key_constraint)
# Foreign keys
fkeys = self.get_foreign_keys(table_name, schema, **tblkw)
for fkey_d in fkeys:
conname = fkey_d['name']
constrained_columns = fkey_d['constrained_columns']
referred_schema = fkey_d['referred_schema']
referred_table = fkey_d['referred_table']
referred_columns = fkey_d['referred_columns']
refspec = []
if referred_schema is not None:
sa_schema.Table(referred_table, table.metadata,
autoload=True, schema=referred_schema,
autoload_with=self.conn,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join(
[referred_schema, referred_table, column]))
else:
sa_schema.Table(referred_table, table.metadata, autoload=True,
autoload_with=self.conn,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join([referred_table, column]))
table.append_constraint(
sa_schema.ForeignKeyConstraint(constrained_columns, refspec,
conname, link_to_name=True))
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d['name']
columns = index_d['column_names']
unique = index_d['unique']
flavor = index_d.get('type', 'unknown type')
if include_columns and \
not set(columns).issubset(include_columns):
util.warn(
"Omitting %s KEY for (%s), key covers omitted columns." %
(flavor, ', '.join(columns)))
continue
sa_schema.Index(name, *[table.columns[c] for c in columns],
**dict(unique=unique))
| mit |
leihu0724/azure-sdk-for-python | setup.py | 1 | 2026 | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from distutils.core import setup
# To build:
# python setup.py sdist
#
# To install:
# python setup.py install
#
# To register (only needed once):
# python setup.py register
#
# To upload:
# python setup.py sdist upload
setup(name='azure',
version='0.10.0',
description='Microsoft Azure client APIs',
long_description=open('README.rst', 'r').read(),
license='Apache License 2.0',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License'],
packages=['azure',
'azure.http',
'azure.servicebus',
'azure.storage',
'azure.servicemanagement'],
install_requires=['python-dateutil',
'pyopenssl',
'futures']
)
| apache-2.0 |
xuxiao19910803/edx | lms/djangoapps/instructor/tests/test_ecommerce.py | 41 | 14884 | """
Unit tests for Ecommerce feature flag in new instructor dashboard.
"""
import datetime
import pytz
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from course_modes.models import CourseMode
from student.roles import CourseFinanceAdminRole
from shoppingcart.models import Coupon, CourseRegistrationCode
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr('shard_1')
class TestECommerceDashboardViews(ModuleStoreTestCase):
"""
Check for E-commerce view on the new instructor dashboard
"""
def setUp(self):
super(TestECommerceDashboardViews, self).setUp()
self.course = CourseFactory.create()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
mode = CourseMode(
course_id=self.course.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=10, currency='usd'
)
mode.save()
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.e_commerce_link = '<a href="" data-section="e-commerce">E-Commerce</a>'
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
def test_pass_e_commerce_tab_in_instructor_dashboard(self):
"""
Test Pass E-commerce Tab is in the Instructor Dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Coupons should show up for White Label sites with priced honor modes.
self.assertTrue('Coupon Code List' in response.content)
def test_user_has_finance_admin_rights_in_e_commerce_tab(self):
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Order/Invoice sales csv button text should render in e-commerce page
self.assertTrue('Total Credit Card Purchases' in response.content)
self.assertTrue('Download All Credit Card Purchases' in response.content)
self.assertTrue('Download All Invoices' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# Order/Invoice sales csv button text should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertFalse('Download All Invoices' in response.content)
def test_user_view_course_price(self):
"""
test to check if the user views the set price button and price in
the instructor dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Total amount html should render in e-commerce page, total amount will be 0
course_honor_mode = CourseMode.mode_for_course(self.course.id, 'honor')
price = course_honor_mode.min_price
self.assertTrue('Course price per seat: <span>$' + str(price) + '</span>' in response.content)
self.assertFalse('+ Set Price</a></span>' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# total amount should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertFalse('+ Set Price</a></span>' in response.content)
def test_update_course_price_check(self):
price = 200
# course B
course2 = CourseFactory.create(org='EDX', display_name='test_course', number='100')
mode = CourseMode(
course_id=course2.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=30, currency='usd'
)
mode.save()
# course A update
CourseMode.objects.filter(course_id=self.course.id).update(min_price=price)
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': price, 'currency': 'usd'}
response = self.client.post(set_course_price_url, data)
self.assertTrue('CourseMode price updated successfully' in response.content)
# Course A updated total amount should be visible in e-commerce page if the user is finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertTrue('Course price per seat: <span>$' + str(price) + '</span>' in response.content)
def test_user_admin_set_course_price(self):
"""
test to set the course price related functionality.
test al the scenarios for setting a new course price
"""
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': '12%', 'currency': 'usd'}
# Value Error course price should be a numeric value
response = self.client.post(set_course_price_url, data)
self.assertTrue("Please Enter the numeric value for the course price" in response.content)
# validation check passes and course price is successfully added
data['course_price'] = 100
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode price updated successfully" in response.content)
course_honor_mode = CourseMode.objects.get(mode_slug='honor')
course_honor_mode.delete()
# Course Mode not exist with mode slug honor
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode with the mode slug({mode_slug}) DoesNotExist".format(mode_slug='honor') in response.content)
def test_add_coupon(self):
"""
Test Add Coupon Scenarios. Handle all the HttpResponses return by add_coupon view
"""
# URL for add_coupon
add_coupon_url = reverse('add_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
expiration_date = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'ADSADASDSAD', 'created_by': self.instructor, 'discount': 5,
'expiration_date': '{month}/{day}/{year}'.format(month=expiration_date.month, day=expiration_date.day, year=expiration_date.year)
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) added successfully".format(code=data['code']) in response.content)
#now add the coupon with the wrong value in the expiration_date
# server will through the ValueError Exception in the expiration_date field
data = {
'code': '213454', 'course_id': self.course.id.to_deprecated_string(),
'description': 'ADSADASDSAD', 'created_by': self.instructor, 'discount': 5,
'expiration_date': expiration_date.strftime('"%d/%m/%Y')
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("Please enter the date in this format i-e month/day/year" in response.content)
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 99
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) already exist".format(code='A2314') in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>ADSADASDSAD</td>' in response.content)
self.assertTrue('<td>A2314</td>' in response.content)
self.assertFalse('<td>111</td>' in response.content)
data = {
'code': 'A2345314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 199
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("Please Enter the Coupon Discount Value Less than or Equal to 100" in response.content)
data['discount'] = '25%'
response = self.client.post(add_coupon_url, data=data)
self.assertTrue('Please Enter the Integer Value for Coupon Discount' in response.content)
course_registration = CourseRegistrationCode(
code='Vs23Ws4j', course_id=unicode(self.course.id), created_by=self.instructor,
mode_slug='honor'
)
course_registration.save()
data['code'] = 'Vs23Ws4j'
response = self.client.post(add_coupon_url, data)
self.assertTrue("The code ({code}) that you have tried to define is already in use as a registration code"
.format(code=data['code']) in response.content)
def test_delete_coupon(self):
"""
Test Delete Coupon Scenarios. Handle all the HttpResponses return by remove_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
# URL for remove_coupon
delete_coupon_url = reverse('remove_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) is already inactive'.format(coupon_id=coupon.id) in response.content)
response = self.client.post(delete_coupon_url, {'id': 24454})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=24454) in response.content)
response = self.client.post(delete_coupon_url, {'id': ''})
self.assertTrue('coupon id is None' in response.content)
def test_get_coupon_info(self):
"""
Test Edit Coupon Info Scenarios. Handle all the HttpResponses return by edit_coupon_info view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
# URL for edit_coupon_info
edit_url = reverse('get_coupon_info', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
self.assertIn(coupon.display_expiry_date, response.content)
response = self.client.post(edit_url, {'id': 444444})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=444444) in response.content)
response = self.client.post(edit_url, {'id': ''})
self.assertTrue('coupon id not found"' in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue("coupon with the coupon id ({coupon_id}) is already inactive".format(coupon_id=coupon.id) in response.content)
def test_update_coupon(self):
"""
Test Update Coupon Info Scenarios. Handle all the HttpResponses return by update_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
data = {
'coupon_id': coupon.id, 'code': 'AS452', 'discount': '10', 'description': 'updated_description', # pylint: disable=no-member
'course_id': coupon.course_id.to_deprecated_string()
}
# URL for update_coupon
update_coupon_url = reverse('update_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) updated Successfully'.format(coupon_id=coupon.id)in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>updated_description</td>' in response.content)
data['coupon_id'] = 1000 # Coupon Not Exist with this ID
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=1000) in response.content)
data['coupon_id'] = '' # Coupon id is not provided
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon id not found' in response.content)
def test_verified_course(self):
"""Verify the e-commerce panel shows up for verified courses as well, without Coupons """
# Change honor mode to verified.
original_mode = CourseMode.objects.get(course_id=self.course.id, mode_slug='honor')
original_mode.delete()
new_mode = CourseMode(
course_id=unicode(self.course.id), mode_slug='verified',
mode_display_name='verified', min_price=10, currency='usd'
)
new_mode.save()
# Get the response value, ensure the Coupon section is not included.
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Coupons should show up for White Label sites with priced honor modes.
self.assertFalse('Coupons List' in response.content)
| agpl-3.0 |
daspots/dasapp | main/auth/auth.py | 1 | 12697 | # coding: utf-8
from __future__ import absolute_import
import functools
import re
from flask_oauthlib import client as oauth
from google.appengine.ext import ndb
import flask
import flask_login
import flask_wtf
import unidecode
import wtforms
import cache
import config
import model
import task
import util
from main import app
_signals = flask.signals.Namespace()
###############################################################################
# Flask Login
###############################################################################
login_manager = flask_login.LoginManager()
class AnonymousUser(flask_login.AnonymousUserMixin):
id = 0
admin = False
name = 'Anonymous'
user_db = None
def key(self):
return None
def has_permission(self, permission):
return False
login_manager.anonymous_user = AnonymousUser
class FlaskUser(AnonymousUser):
def __init__(self, user_db):
self.user_db = user_db
self.id = user_db.key.id()
self.name = user_db.name
self.admin = user_db.admin
def key(self):
return self.user_db.key.urlsafe()
def get_id(self):
return self.user_db.key.urlsafe()
def is_authenticated(self):
return True
def is_active(self):
return self.user_db.active
def is_anonymous(self):
return False
def has_permission(self, permission):
return self.user_db.has_permission(permission)
@login_manager.user_loader
def load_user(key):
user_db = ndb.Key(urlsafe=key).get()
if user_db:
return FlaskUser(user_db)
return None
login_manager.init_app(app)
def current_user_id():
return flask_login.current_user.id
def current_user_key():
return flask_login.current_user.user_db.key if flask_login.current_user.user_db else None
def current_user_db():
return flask_login.current_user.user_db
def is_logged_in():
return flask_login.current_user.id != 0
###############################################################################
# Decorators
###############################################################################
def login_required(f):
decorator_order_guard(f, 'auth.login_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in():
return f(*args, **kwargs)
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return decorated_function
def admin_required(f):
decorator_order_guard(f, 'auth.admin_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in() and current_user_db().admin:
return f(*args, **kwargs)
if not is_logged_in() and flask.request.path.startswith('/api/'):
return flask.abort(401)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
def cron_required(f):
decorator_order_guard(f, 'auth.cron_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if 'X-Appengine-Cron' in flask.request.headers:
return f(*args, **kwargs)
if is_logged_in() and current_user_db().admin:
return f(*args, **kwargs)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
permission_registered = _signals.signal('permission-registered')
def permission_required(permission=None, methods=None):
def permission_decorator(f):
decorator_order_guard(f, 'auth.permission_required')
# default to decorated function name as permission
perm = permission or f.func_name
meths = [m.upper() for m in methods] if methods else None
permission_registered.send(f, permission=perm)
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if meths and flask.request.method.upper() not in meths:
return f(*args, **kwargs)
if is_logged_in() and current_user_db().has_permission(perm):
return f(*args, **kwargs)
if not is_logged_in():
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
return permission_decorator
###############################################################################
# Sign in stuff
###############################################################################
class SignInForm(flask_wtf.FlaskForm):
email = wtforms.StringField(
'Email',
[wtforms.validators.required()],
filters=[util.email_filter],
)
password = wtforms.StringField(
'Password',
[wtforms.validators.required()],
)
remember = wtforms.BooleanField(
'Keep me signed in',
[wtforms.validators.optional()],
)
recaptcha = flask_wtf.RecaptchaField()
next_url = wtforms.HiddenField()
@app.route('/signin/', methods=['GET', 'POST'])
def signin():
next_url = util.get_next_url()
# form = None
# if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignInForm())
save_request_params()
if form.validate_on_submit():
result = get_user_db_from_email(form.email.data, form.password.data)
if result:
cache.reset_auth_attempt()
return signin_user_db(result)
if result is None:
form.email.errors.append('Email or Password do not match')
if result is False:
return flask.redirect(flask.url_for('welcome'))
if not form.errors:
form.next_url.data = next_url
if form and form.errors:
cache.bump_auth_attempt()
return flask.render_template(
'auth/auth.html',
title='Sign in',
html_class='auth',
next_url=next_url,
form=form,
form_type='signin',
# form_type='signin' if config.CONFIG_DB.has_email_authentication else '',
**urls_for_oauth(next_url)
)
###############################################################################
# Sign up stuff
###############################################################################
class SignUpForm(flask_wtf.FlaskForm):
email = wtforms.StringField(
'Email',
[wtforms.validators.required(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = flask_wtf.RecaptchaField()
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
# next_url = util.get_next_url()
next_url = None
form = None
# if config.CONFIG_DB.has_email_authentication:
if True:
form = form_with_recaptcha(SignUpForm())
save_request_params()
if form.validate_on_submit():
user_db = model.User.get_by('email', form.email.data)
if user_db:
form.email.errors.append('This email is already taken.')
if not form.errors:
user_db = create_user_db(
None,
util.create_name_from_email(form.email.data),
form.email.data,
form.email.data,
)
user_db.put()
task.activate_user_notification(user_db)
cache.bump_auth_attempt()
return flask.redirect(flask.url_for('signin'))
if form and form.errors:
cache.bump_auth_attempt()
title = 'Sign up' if True else 'Sign in'
return flask.render_template(
'auth/auth.html',
title=title,
html_class='auth',
next_url=next_url,
form=form,
**urls_for_oauth(next_url)
)
###############################################################################
# Sign out stuff
###############################################################################
@app.route('/signout/')
def signout():
flask_login.logout_user()
return flask.redirect(util.param('next') or flask.url_for('signin'))
###############################################################################
# Helpers
###############################################################################
def url_for_signin(service_name, next_url):
return flask.url_for('signin_%s' % service_name, next=next_url)
def urls_for_oauth(next_url):
return {
'azure_ad_signin_url': url_for_signin('azure_ad', next_url),
'bitbucket_signin_url': url_for_signin('bitbucket', next_url),
'dropbox_signin_url': url_for_signin('dropbox', next_url),
'facebook_signin_url': url_for_signin('facebook', next_url),
'github_signin_url': url_for_signin('github', next_url),
'google_signin_url': url_for_signin('google', next_url),
'gae_signin_url': url_for_signin('gae', next_url),
'instagram_signin_url': url_for_signin('instagram', next_url),
'linkedin_signin_url': url_for_signin('linkedin', next_url),
'mailru_signin_url': url_for_signin('mailru', next_url),
'microsoft_signin_url': url_for_signin('microsoft', next_url),
'reddit_signin_url': url_for_signin('reddit', next_url),
'twitter_signin_url': url_for_signin('twitter', next_url),
'vk_signin_url': url_for_signin('vk', next_url),
'yahoo_signin_url': url_for_signin('yahoo', next_url),
}
def create_oauth_app(service_config, name):
upper_name = name.upper()
app.config[upper_name] = service_config
service_oauth = oauth.OAuth()
service_app = service_oauth.remote_app(name, app_key=upper_name)
service_oauth.init_app(app)
return service_app
def decorator_order_guard(f, decorator_name):
if f in app.view_functions.values():
raise SyntaxError(
'Do not use %s above app.route decorators as it would not be checked. '
'Instead move the line below the app.route lines.' % decorator_name
)
def save_request_params():
flask.session['auth-params'] = {
'next': util.get_next_url(),
'remember': util.param('remember'),
}
def signin_oauth(oauth_app, scheme=None):
try:
flask.session.pop('oauth_token', None)
save_request_params()
return oauth_app.authorize(callback=flask.url_for(
'%s_authorized' % oauth_app.name, _external=True, _scheme=scheme
))
except oauth.OAuthException:
flask.flash(
'Something went wrong with sign in. Please try again.',
category='danger',
)
return flask.redirect(flask.url_for('signin', next=util.get_next_url()))
def form_with_recaptcha(form):
should_have_recaptcha = cache.get_auth_attempt() >= config.RECAPTCHA_LIMIT
if not (should_have_recaptcha and config.CONFIG_DB.has_recaptcha):
del form.recaptcha
return form
###############################################################################
# User related stuff
###############################################################################
def create_user_db(auth_id, name, username, email='', verified=False, **props):
email = email.lower() if email else ''
if verified and email:
user_dbs, cursors = model.User.get_dbs(email=email, verified=True, limit=2)
if len(user_dbs) == 1:
user_db = user_dbs[0]
user_db.auth_ids.append(auth_id)
user_db.put()
task.new_user_notification(user_db)
return user_db
if isinstance(username, str):
username = username.decode('utf-8')
username = unidecode.unidecode(username.split('@')[0].lower()).strip()
username = re.sub(r'[\W_]+', '.', username)
new_username = username
n = 1
while not model.User.is_username_available(new_username):
new_username = '%s%d' % (username, n)
n += 1
user_db = model.User(
name=name,
email=email,
username=new_username,
auth_ids=[auth_id] if auth_id else [],
verified=verified,
token=util.uuid(),
**props
)
user_db.put()
task.new_user_notification(user_db)
return user_db
@ndb.toplevel
def signin_user_db(user_db):
if not user_db:
return flask.redirect(flask.url_for('signin'))
flask_user_db = FlaskUser(user_db)
auth_params = flask.session.get('auth-params', {
'next': flask.url_for('welcome'),
'remember': False,
})
flask.session.pop('auth-params', None)
if flask_login.login_user(flask_user_db, remember=auth_params['remember']):
user_db.put_async()
return flask.redirect(util.get_next_url(auth_params['next']))
flask.flash('Sorry, but you could not sign in.', category='danger')
return flask.redirect(flask.url_for('signin'))
def get_user_db_from_email(email, password):
user_dbs, cursors = model.User.get_dbs(email=email, active=True, limit=2)
if not user_dbs:
return None
if len(user_dbs) > 1:
flask.flash('''We are sorry but it looks like there is a conflict with
your account. Our support team has been informed and we will get
back to you as soon as possible.''', category='danger')
task.email_conflict_notification(email)
return False
user_db = user_dbs[0]
if user_db.password_hash == util.password_hash(user_db, password):
return user_db
return None
| mit |
mach0/QGIS | tests/src/python/test_qgsrasterrange.py | 45 | 20098 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterRange.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '07/06/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA switch sip api
from qgis.core import QgsRasterRange
from qgis.testing import unittest
class TestQgsRasterRange(unittest.TestCase):
def testBasic(self):
range = QgsRasterRange(1, 5)
self.assertEqual(range.min(), 1)
self.assertEqual(range.max(), 5)
range.setMin(2.2)
range.setMax(10.4)
self.assertEqual(range.min(), 2.2)
self.assertEqual(range.max(), 10.4)
self.assertEqual(range.bounds(), QgsRasterRange.IncludeMinAndMax)
range.setBounds(QgsRasterRange.IncludeMin)
self.assertEqual(range.bounds(), QgsRasterRange.IncludeMin)
def testEquality(self):
range = QgsRasterRange(1, 5)
range2 = QgsRasterRange(1, 5)
self.assertEqual(range, range2)
range2.setMin(2)
self.assertNotEqual(range, range2)
range2.setMin(1)
range2.setMax(4)
self.assertNotEqual(range, range2)
range2.setMax(5)
self.assertEqual(range, range2)
range.setBounds(QgsRasterRange.IncludeMax)
self.assertNotEqual(range, range2)
range2.setBounds(QgsRasterRange.IncludeMax)
self.assertEqual(range, range2)
range = QgsRasterRange()
range2 = QgsRasterRange()
self.assertEqual(range, range2)
range.setMin(1)
self.assertNotEqual(range, range2)
range2.setMin(1)
self.assertEqual(range, range2)
range = QgsRasterRange()
range2 = QgsRasterRange()
range.setMax(5)
self.assertNotEqual(range, range2)
range2.setMax(5)
self.assertEqual(range, range2)
def testContains(self):
range = QgsRasterRange(1, 5)
self.assertTrue(range.contains(1))
self.assertTrue(range.contains(5))
self.assertTrue(range.contains(4))
self.assertTrue(range.contains(1.00001))
self.assertTrue(range.contains(4.99999))
self.assertFalse(range.contains(0.99999))
self.assertFalse(range.contains(5.00001))
# with nan min/maxs
range = QgsRasterRange()
self.assertTrue(range.contains(1))
self.assertTrue(range.contains(-909999999))
self.assertTrue(range.contains(999999999))
range.setMin(5)
self.assertFalse(range.contains(0))
self.assertTrue(range.contains(5))
self.assertTrue(range.contains(10000000))
range = QgsRasterRange()
range.setMax(5)
self.assertFalse(range.contains(6))
self.assertTrue(range.contains(5))
self.assertTrue(range.contains(-99999))
range = QgsRasterRange(1, 5, QgsRasterRange.IncludeMax)
self.assertFalse(range.contains(0))
self.assertFalse(range.contains(1))
self.assertTrue(range.contains(2))
self.assertTrue(range.contains(5))
self.assertFalse(range.contains(6))
range = QgsRasterRange(1, 5, QgsRasterRange.IncludeMin)
self.assertFalse(range.contains(0))
self.assertTrue(range.contains(1))
self.assertTrue(range.contains(2))
self.assertFalse(range.contains(5))
self.assertFalse(range.contains(6))
range = QgsRasterRange(1, 5, QgsRasterRange.Exclusive)
self.assertFalse(range.contains(0))
self.assertFalse(range.contains(1))
self.assertTrue(range.contains(2))
self.assertFalse(range.contains(5))
self.assertFalse(range.contains(6))
def testContainsList(self):
self.assertFalse(QgsRasterRange.contains(1, []))
ranges = [QgsRasterRange(1, 5)]
self.assertTrue(QgsRasterRange.contains(3, ranges))
self.assertFalse(QgsRasterRange.contains(13, ranges))
ranges.append(QgsRasterRange(11, 15))
self.assertTrue(QgsRasterRange.contains(3, ranges))
self.assertTrue(QgsRasterRange.contains(13, ranges))
self.assertFalse(QgsRasterRange.contains(16, ranges))
def testOverlaps(self):
# includes both ends
range = QgsRasterRange(0, 10, QgsRasterRange.IncludeMinAndMax)
self.assertTrue(range.overlaps(QgsRasterRange(1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0)))
self.assertFalse(range.overlaps(QgsRasterRange(-10, -1)))
self.assertFalse(range.overlaps(QgsRasterRange(11, 12)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(0, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(10, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(11, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(float('NaN'), -1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 0)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 10)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 11)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.Exclusive)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMin)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMax)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.Exclusive)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMin)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMax)))
range = QgsRasterRange(float('NaN'), 10, QgsRasterRange.IncludeMinAndMax)
self.assertTrue(range.overlaps(QgsRasterRange(1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0)))
self.assertTrue(range.overlaps(QgsRasterRange(-10, -1)))
self.assertFalse(range.overlaps(QgsRasterRange(11, 12)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(0, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(10, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(11, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), -1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 0)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 10)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 11)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.Exclusive)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMin)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMax)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.Exclusive)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMin)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMax)))
range = QgsRasterRange(0, float('NaN'), QgsRasterRange.IncludeMinAndMax)
self.assertTrue(range.overlaps(QgsRasterRange(1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0)))
self.assertFalse(range.overlaps(QgsRasterRange(-10, -1)))
self.assertTrue(range.overlaps(QgsRasterRange(11, 12)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(0, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(10, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(11, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(float('NaN'), -1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 0)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 10)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 11)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.Exclusive)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMin)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMax)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.Exclusive)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMin)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMax)))
# includes left end
range = QgsRasterRange(0, 10, QgsRasterRange.IncludeMin)
self.assertTrue(range.overlaps(QgsRasterRange(1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 11)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0)))
self.assertFalse(range.overlaps(QgsRasterRange(-10, -1)))
self.assertFalse(range.overlaps(QgsRasterRange(11, 12)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(0, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(1, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(10, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(11, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(float('NaN'), -1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 0)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 10)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 11)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.Exclusive)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMin)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMax)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.Exclusive)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMin)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMax)))
# includes right end
range = QgsRasterRange(0, 10, QgsRasterRange.IncludeMax)
self.assertTrue(range.overlaps(QgsRasterRange(1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0)))
self.assertFalse(range.overlaps(QgsRasterRange(-10, -1)))
self.assertFalse(range.overlaps(QgsRasterRange(11, 12)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(0, 50)))
self.assertTrue(range.overlaps(QgsRasterRange(1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(10, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(11, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(float('NaN'), -1)))
self.assertFalse(range.overlaps(QgsRasterRange(float('NaN'), 0)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 10)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 11)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.Exclusive)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMin)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMax)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.Exclusive)))
self.assertTrue(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMin)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMax)))
# includes neither end
range = QgsRasterRange(0, 10, QgsRasterRange.Exclusive)
self.assertTrue(range.overlaps(QgsRasterRange(1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(0, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 10)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 9)))
self.assertTrue(range.overlaps(QgsRasterRange(1, 11)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, 11)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0)))
self.assertFalse(range.overlaps(QgsRasterRange(-10, -1)))
self.assertFalse(range.overlaps(QgsRasterRange(11, 12)))
self.assertTrue(range.overlaps(QgsRasterRange(-1, float('NaN'))))
self.assertTrue(range.overlaps(QgsRasterRange(1, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(10, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(11, float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(float('NaN'), -1)))
self.assertFalse(range.overlaps(QgsRasterRange(float('NaN'), 0)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 1)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 10)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), 11)))
self.assertTrue(range.overlaps(QgsRasterRange(float('NaN'), float('NaN'))))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.Exclusive)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMin)))
self.assertFalse(range.overlaps(QgsRasterRange(-1, 0, QgsRasterRange.IncludeMax)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.Exclusive)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMin)))
self.assertFalse(range.overlaps(QgsRasterRange(10, 11, QgsRasterRange.IncludeMax)))
def testAsText(self):
self.assertEqual(QgsRasterRange(0, 10, QgsRasterRange.IncludeMinAndMax).asText(), '0 ≤ x ≤ 10')
self.assertEqual(QgsRasterRange(-1, float('NaN')).asText(), '-1 ≤ x ≤ ∞')
self.assertEqual(QgsRasterRange(float('NaN'), 5).asText(), '-∞ ≤ x ≤ 5')
self.assertEqual(QgsRasterRange(float('NaN'), float('NaN')).asText(), '-∞ ≤ x ≤ ∞')
self.assertEqual(QgsRasterRange(0, 10, QgsRasterRange.IncludeMin).asText(), '0 ≤ x < 10')
self.assertEqual(QgsRasterRange(-1, float('NaN'), QgsRasterRange.IncludeMin).asText(), '-1 ≤ x < ∞')
self.assertEqual(QgsRasterRange(float('NaN'), 5, QgsRasterRange.IncludeMin).asText(), '-∞ ≤ x < 5')
self.assertEqual(QgsRasterRange(float('NaN'), float('NaN'), QgsRasterRange.IncludeMin).asText(), '-∞ ≤ x < ∞')
self.assertEqual(QgsRasterRange(0, 10, QgsRasterRange.IncludeMax).asText(), '0 < x ≤ 10')
self.assertEqual(QgsRasterRange(-1, float('NaN'), QgsRasterRange.IncludeMax).asText(), '-1 < x ≤ ∞')
self.assertEqual(QgsRasterRange(float('NaN'), 5, QgsRasterRange.IncludeMax).asText(), '-∞ < x ≤ 5')
self.assertEqual(QgsRasterRange(float('NaN'), float('NaN'), QgsRasterRange.IncludeMax).asText(), '-∞ < x ≤ ∞')
self.assertEqual(QgsRasterRange(0, 10, QgsRasterRange.Exclusive).asText(), '0 < x < 10')
self.assertEqual(QgsRasterRange(-1, float('NaN'), QgsRasterRange.Exclusive).asText(), '-1 < x < ∞')
self.assertEqual(QgsRasterRange(float('NaN'), 5, QgsRasterRange.Exclusive).asText(), '-∞ < x < 5')
self.assertEqual(QgsRasterRange(float('NaN'), float('NaN'), QgsRasterRange.Exclusive).asText(), '-∞ < x < ∞')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Nitaco/ansible | contrib/inventory/mdt_dynamic_inventory.py | 117 | 4538 | #!/usr/bin/env python
# (c) 2016, Julian Barnett <jbarnett@tableau.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
MDT external inventory script
=================================
author: J Barnett 06/23/2016 01:15
maintainer: J Barnett (github @jbarnett1981)
'''
import argparse
import json
import pymssql
try:
import configparser
except ImportError:
import ConfigParser as configparser
class MDTInventory(object):
def __init__(self):
''' Main execution path '''
self.conn = None
# Initialize empty inventory
self.inventory = self._empty_inventory()
# Read CLI arguments
self.read_settings()
self.parse_cli_args()
# Get Hosts
if self.args.list:
self.get_hosts()
# Get specific host vars
if self.args.host:
self.get_hosts(self.args.host)
def _connect(self, query):
'''
Connect to MDT and dump contents of dbo.ComputerIdentity database
'''
if not self.conn:
self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password,
database=self.mdt_database)
cursor = self.conn.cursor()
cursor.execute(query)
self.mdt_dump = cursor.fetchall()
self.conn.close()
def get_hosts(self, hostname=False):
'''
Gets host from MDT Database
'''
if hostname:
query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role "
"FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname)
else:
query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID'
self._connect(query)
# Configure to group name configured in Ansible Tower for this inventory
groupname = self.mdt_groupname
# Initialize empty host list
hostlist = []
# Parse through db dump and populate inventory
for hosts in self.mdt_dump:
self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]}
hostlist.append(hosts[1])
self.inventory[groupname] = hostlist
# Print it all out
print(json.dumps(self.inventory, indent=2))
def _empty_inventory(self):
'''
Create empty inventory dictionary
'''
return {"_meta": {"hostvars": {}}}
def read_settings(self):
'''
Reads the settings from the mdt.ini file
'''
config = configparser.SafeConfigParser()
config.read('mdt.ini')
# MDT Server and instance and database
self.mdt_server = config.get('mdt', 'server')
self.mdt_instance = config.get('mdt', 'instance')
self.mdt_database = config.get('mdt', 'database')
# MDT Login credentials
if config.has_option('mdt', 'user'):
self.mdt_user = config.get('mdt', 'user')
if config.has_option('mdt', 'password'):
self.mdt_password = config.get('mdt', 'password')
# Group name in Tower
if config.has_option('tower', 'groupname'):
self.mdt_groupname = config.get('tower', 'groupname')
def parse_cli_args(self):
'''
Command line argument processing
'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT')
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
if __name__ == "__main__":
# Run the script
MDTInventory()
| gpl-3.0 |
Hybrid-Cloud/conveyor | conveyor/conveyorheat/engine/resources/openstack/neutron/lbaas/pool_member.py | 1 | 6964 | #
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine import constraints
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine.resources.openstack.neutron import neutron
from conveyor.conveyorheat.engine import support
from conveyor.i18n import _
class PoolMember(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Pool Members.
A pool member represents a single backend node.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
POOL, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
SUBNET,
) = (
'pool', 'address', 'protocol_port', 'weight', 'admin_state_up',
'subnet'
)
ATTRIBUTES = (
ADDRESS_ATTR, POOL_ID_ATTR
) = (
'address', 'pool_id'
)
properties_schema = {
POOL: properties.Schema(
properties.Schema.STRING,
_('Name or ID of the load balancing pool.'),
required=True
),
ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address of the pool member on the pool network.'),
required=True,
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('Port on which the pool member listens for requests or '
'connections.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
WEIGHT: properties.Schema(
properties.Schema.INTEGER,
_('Weight of pool member in the pool (default to 1).'),
default=1,
constraints=[
constraints.Range(0, 256),
],
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the pool member.'),
default=True,
update_allowed=True,
constraints=[constraints.AllowedValues(['True'])]
),
SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet name or ID of this member.'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
}
attributes_schema = {
ADDRESS_ATTR: attributes.Schema(
_('The IP address of the pool member.'),
type=attributes.Schema.STRING
),
POOL_ID_ATTR: attributes.Schema(
_('The ID of the pool to which the pool member belongs.'),
type=attributes.Schema.STRING
)
}
def __init__(self, name, definition, stack):
super(PoolMember, self).__init__(name, definition, stack)
self._pool_id = None
self._lb_id = None
@property
def pool_id(self):
if self._pool_id is None:
self._pool_id = self.client_plugin().find_resourceid_by_name_or_id(
self.POOL,
self.properties[self.POOL],
cmd_resource='lbaas_pool')
return self._pool_id
@property
def lb_id(self):
if self._lb_id is None:
pool = self.client().show_lbaas_pool(self.pool_id)['pool']
listener_id = pool['listeners'][0]['id']
listener = self.client().show_listener(listener_id)['listener']
self._lb_id = listener['loadbalancers'][0]['id']
return self._lb_id
def _check_lb_status(self):
return self.client_plugin().check_lb_status(self.lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_pool(
properties, self.POOL, 'pool_id')
properties.pop('pool_id')
if self.SUBNET in properties:
self.client_plugin().resolve_subnet(
properties, self.SUBNET, 'subnet_id')
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
member = self.client().create_lbaas_member(
self.pool_id, {'member': properties})['member']
self.resource_id_set(member['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
member = self.client().show_lbaas_member(self.resource_id,
self.pool_id)
return member['member']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_lbaas_member(self.resource_id,
self.pool_id,
{'member': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_lbaas_member(self.resource_id,
self.pool_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::PoolMember': PoolMember,
}
| apache-2.0 |
smalls257/VRvisu | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/encodings/cp1255.py | 593 | 12722 | """ Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xd7' # 0xAA -> MULTIPLICATION SIGN
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xf7' # 0xBA -> DIVISION SIGN
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
u'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
u'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
u'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
u'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
u'\u05b5' # 0xC5 -> HEBREW POINT TSERE
u'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
u'\u05b7' # 0xC7 -> HEBREW POINT PATAH
u'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
u'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
u'\ufffe' # 0xCA -> UNDEFINED
u'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
u'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
u'\u05bd' # 0xCD -> HEBREW POINT METEG
u'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
u'\u05bf' # 0xCF -> HEBREW POINT RAFE
u'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
u'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
u'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
u'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
u'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
u'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
u'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
u'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
u'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
u'\ufffe' # 0xD9 -> UNDEFINED
u'\ufffe' # 0xDA -> UNDEFINED
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
u'\u05d1' # 0xE1 -> HEBREW LETTER BET
u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
u'\u05d4' # 0xE4 -> HEBREW LETTER HE
u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0xE7 -> HEBREW LETTER HET
u'\u05d8' # 0xE8 -> HEBREW LETTER TET
u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
u'\u05db' # 0xEB -> HEBREW LETTER KAF
u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
u'\u05de' # 0xEE -> HEBREW LETTER MEM
u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0xF4 -> HEBREW LETTER PE
u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
u'\u05ea' # 0xFA -> HEBREW LETTER TAV
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
endlessm/chromium-browser | tools/lldb/lldb_chrome.py | 4 | 1446 | # Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
LLDB Support for Chromium types in Xcode
Add the following to your ~/.lldbinit:
command script import {Path to SRC Root}/tools/lldb/lldb_chrome.py
"""
import lldb
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('type summary add -F ' +
'lldb_chrome.basestring16_SummaryProvider base::string16')
def basestring16_SummaryProvider(valobj, internal_dict):
s = valobj.GetValueForExpressionPath('.__r_.__value_.__s')
l = valobj.GetValueForExpressionPath('.__r_.__value_.__l')
size = s.GetChildMemberWithName('__size_').GetValueAsUnsigned(0)
is_short_string = size & 128 == 0 # Assumes _LIBCPP_BIG_ENDIAN is defined.
if is_short_string:
length = size >> 1
data = s.GetChildMemberWithName('__data_').GetPointeeData(0, length)
else:
length = l.GetChildMemberWithName('__size_').GetValueAsUnsigned(0)
data = l.GetChildMemberWithName('__data_').GetPointeeData(0, length)
error = lldb.SBError()
bytes_to_read = 2 * length
if not bytes_to_read:
return '""'
byte_string = data.ReadRawData(error, 0, bytes_to_read)
if error.fail:
return 'Summary error: %s' % error.description
else:
return '"' + byte_string.decode('utf-16') + '"'
| bsd-3-clause |
BiznetGIO/horizon | openstack_dashboard/management/commands/make_web_conf.py | 3 | 11356 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import multiprocessing
import os
import re
import socket
import subprocess
import sys
import warnings
from django.conf import settings
from django.core.management import base
from django import template
# Suppress DeprecationWarnings which clutter the output to the point of
# rendering it unreadable.
warnings.simplefilter('ignore')
cmd_name = __name__.split('.')[-1]
CURDIR = os.path.realpath(os.path.dirname(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(CURDIR, '../..'))
STATIC_PATH = os.path.realpath(os.path.join(PROJECT_PATH, '../static'))
# Known apache regular expression to retrieve it's version
APACHE_VERSION_REG = r'Apache/(?P<version>[\d.]*)'
# Known apache commands to retrieve it's version
APACHE2_VERSION_CMDS = (
(('/usr/sbin/apache2ctl', '-V'), APACHE_VERSION_REG),
(('/usr/sbin/apache2', '-v'), APACHE_VERSION_REG),
)
# Known apache log directory locations
APACHE_LOG_DIRS = (
'/var/log/httpd', # RHEL / Red Hat / CentOS / Fedora Linux
'/var/log/apache2', # Debian / Ubuntu Linux
)
# Default log directory
DEFAULT_LOG_DIR = '/var/log'
def _getattr(obj, name, default):
"""Like getattr but return `default` if None or False.
By default, getattr(obj, name, default) returns default only if
attr does not exist, here, we return `default` even if attr evaluates to
None or False.
"""
value = getattr(obj, name, default)
if value:
return value
else:
return default
context = template.Context({
'DJANGO_SETTINGS_MODULE': os.environ['DJANGO_SETTINGS_MODULE'],
'HOSTNAME': socket.getfqdn(),
'PROJECT_PATH': os.path.realpath(
_getattr(settings, 'ROOT_PATH', PROJECT_PATH)),
'STATIC_PATH': os.path.realpath(
_getattr(settings, 'STATIC_ROOT', STATIC_PATH)),
'SSLCERT': '/etc/pki/tls/certs/ca.crt',
'SSLKEY': '/etc/pki/tls/private/ca.key',
'CACERT': None,
'PROCESSES': multiprocessing.cpu_count() + 1,
})
context['PROJECT_ROOT'] = os.path.dirname(context['PROJECT_PATH'])
context['PROJECT_DIR_NAME'] = os.path.basename(
context['PROJECT_PATH'].split(context['PROJECT_ROOT'])[1])
context['PROJECT_NAME'] = context['PROJECT_DIR_NAME']
context['WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'wsgi/horizon.wsgi')
VHOSTNAME = context['HOSTNAME'].split('.')
VHOSTNAME[0] = context['PROJECT_NAME']
context['VHOSTNAME'] = '.'.join(VHOSTNAME)
if len(VHOSTNAME) > 1:
context['DOMAINNAME'] = '.'.join(VHOSTNAME[1:])
else:
context['DOMAINNAME'] = 'openstack.org'
context['ADMIN'] = 'webmaster@%s' % context['DOMAINNAME']
context['ACTIVATE_THIS'] = None
virtualenv = os.environ.get('VIRTUAL_ENV')
if virtualenv:
activate_this = os.path.join(
virtualenv, 'bin/activate_this.py')
if os.path.exists(activate_this):
context['ACTIVATE_THIS'] = activate_this
# Try to detect apache's version
# We fallback on 2.4.
context['APACHE2_VERSION'] = 2.4
APACHE2_VERSION = None
for cmd in APACHE2_VERSION_CMDS:
if os.path.exists(cmd[0][0]):
try:
reg = re.compile(cmd[1])
res = reg.search(
subprocess.check_output(cmd[0], stderr=subprocess.STDOUT))
if res:
APACHE2_VERSION = res.group('version')
break
except subprocess.CalledProcessError:
pass
if APACHE2_VERSION:
ver_nums = APACHE2_VERSION.split('.')
if len(ver_nums) >= 2:
try:
context['APACHE2_VERSION'] = float('.'.join(ver_nums[:2]))
except ValueError:
pass
def find_apache_log_dir():
for log_dir in APACHE_LOG_DIRS:
if os.path.exists(log_dir) and os.path.isdir(log_dir):
return log_dir
return DEFAULT_LOG_DIR
context['LOGDIR'] = find_apache_log_dir()
class Command(base.BaseCommand):
args = ''
help = """Create %(wsgi_file)s
or the contents of an apache %(p_name)s.conf file (on stdout).
The apache configuration is generated on stdout because the place of this
file is distribution dependent.
examples::
manage.py %(cmd_name)s --wsgi # creates %(wsgi_file)s
manage.py %(cmd_name)s --apache # creates an apache vhost conf file (on \
stdout).
manage.py %(cmd_name)s --apache --ssl --mail=%(admin)s \
--project=%(p_name)s --hostname=%(hostname)s
To create an acpache configuration file, redirect the output towards the
location you desire, e.g.::
manage.py %(cmd_name)s --apache > \
/etc/httpd/conf.d/openstack_dashboard.conf
""" % {
'cmd_name': cmd_name,
'p_name': context['PROJECT_NAME'],
'wsgi_file': context['WSGI_FILE'],
'admin': context['ADMIN'],
'hostname': context['VHOSTNAME'], }
def add_arguments(self, parser):
# TODO(ygbo): Add an --nginx option.
parser.add_argument(
"-a", "--apache",
default=False, action="store_true", dest="apache",
help="generate an apache vhost configuration"
)
parser.add_argument(
"--cacert",
dest="cacert",
help=("Use with the --apache and --ssl option to define the path"
" to the SSLCACertificateFile"),
metavar="CACERT"
)
parser.add_argument(
"-f", "--force",
default=False, action="store_true", dest="force",
help="force overwriting of an existing %s file" %
context['WSGI_FILE']
)
parser.add_argument(
"-H", "--hostname",
dest="hostname",
help=("Use with the --apache option to define the server's"
" hostname (default : %s)") % context['VHOSTNAME'],
metavar="HOSTNAME"
)
parser.add_argument(
"--logdir",
dest="logdir",
help=("Use with the --apache option to define the path to "
"the apache log directory(default : %s)"
% context['LOGDIR']),
metavar="CACERT"
)
parser.add_argument(
"-m", "--mail",
dest="mail",
help=("Use with the --apache option to define the web site"
" administrator's email (default : %s)") %
context['ADMIN'],
metavar="MAIL"
)
parser.add_argument(
"-n", "--namedhost",
default=False, action="store_true", dest="namedhost",
help=("Use with the --apache option. The apache vhost "
"configuration will work only when accessed with "
"the proper hostname (see --hostname).")
)
parser.add_argument(
"--processes",
dest="processes",
help=("Use with the --apache option to define the number of "
"apache processes (by default the number of cpus +1 which "
"is %s on this machine).") % context['PROCESSES'],
metavar="PROCESSES"
)
parser.add_argument(
"-p", "--project",
dest="project",
help=("Use with the --apache option to define the project "
"name (default : %s)") % context['PROJECT_NAME'],
metavar="PROJECT"
)
parser.add_argument(
"-s", "--ssl",
default=False, action="store_true", dest="ssl",
help=("Use with the --apache option. The apache vhost "
"configuration will use an SSL configuration")
)
parser.add_argument(
"--sslcert",
dest="sslcert",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateFile (default : %s)"
) % context['SSLCERT'],
metavar="SSLCERT"
)
parser.add_argument(
"--sslkey",
dest="sslkey",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateKeyFile "
"(default : %s)") % context['SSLKEY'],
metavar="SSLKEY"
)
parser.add_argument(
"--apache-version",
dest="apache_version",
type=float,
help=("Use with the --apache option to define the apache "
"major (as a floating point number) version "
"(default : %s)."
% context['APACHE2_VERSION']),
metavar="APACHE_VERSION"
)
parser.add_argument(
"-w", "--wsgi",
default=False, action="store_true", dest="wsgi",
help="generate the horizon.wsgi file"
)
def handle(self, *args, **options):
force = options.get('force')
context['SSL'] = options.get('ssl')
if options.get('mail'):
context['ADMIN'] = options['mail']
if options.get('cacert'):
context['CACERT'] = options['cacert']
if options.get('logdir'):
context['LOGDIR'] = options['logdir'].rstrip('/')
if options.get('processes'):
context['PROCESSES'] = options['processes']
if options.get('project'):
context['PROJECT_NAME'] = options['project']
if options.get('hostname'):
context['VHOSTNAME'] = options['hostname']
if options.get('sslcert'):
context['SSLCERT'] = options['sslcert']
if options.get('sslkey'):
context['SSLKEY'] = options['sslkey']
if options.get('apache_version'):
context['APACHE2_VERSION'] = options['apache_version']
if options.get('namedhost'):
context['NAMEDHOST'] = context['VHOSTNAME']
else:
context['NAMEDHOST'] = '*'
# Generate the WSGI.
if options.get('wsgi'):
with open(
os.path.join(CURDIR, 'horizon.wsgi.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
if not os.path.exists(context['WSGI_FILE']) or force:
with open(context['WSGI_FILE'], 'w') as fp:
fp.write(wsgi_template.render(context))
print('Generated "%s"' % context['WSGI_FILE'])
else:
sys.exit('"%s" already exists, use --force to overwrite' %
context['WSGI_FILE'])
# Generate the apache configuration.
elif options.get('apache'):
with open(
os.path.join(CURDIR, 'apache_vhost.conf.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
sys.stdout.write(wsgi_template.render(context))
else:
self.print_help('manage.py', cmd_name)
| apache-2.0 |
cloudbau/nova | nova/tests/fake_hosts.py | 42 | 1314 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provides some fake hosts to test host and service related functions
"""
from nova.tests.objects import test_service
HOST_LIST = [
{"host_name": "host_c1", "service": "compute", "zone": "nova"},
{"host_name": "host_c2", "service": "compute", "zone": "nova"}]
OS_API_HOST_LIST = {"hosts": HOST_LIST}
HOST_LIST_NOVA_ZONE = [
{"host_name": "host_c1", "service": "compute", "zone": "nova"},
{"host_name": "host_c2", "service": "compute", "zone": "nova"}]
service_base = test_service.fake_service
SERVICES_LIST = [
dict(service_base, host='host_c1', topic='compute'),
dict(service_base, host='host_c2', topic='compute')]
| apache-2.0 |
BetterWorks/djangosaml2 | djangosaml2/utils.py | 2 | 2263 | # Copyright (C) 2012 Yaco Sistemas (http://www.yaco.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from saml2.s_utils import UnknownSystemEntity
def get_custom_setting(name, default=None):
return getattr(settings, name, default)
def available_idps(config, langpref=None):
if langpref is None:
langpref = "en"
idps = set()
for metadata_name, metadata in config.metadata.metadata.items():
result = metadata.any('idpsso_descriptor', 'single_sign_on_service')
if result:
idps = idps.union(set(result.keys()))
return dict([(idp, config.metadata.name(idp, langpref)) for idp in idps])
def get_idp_sso_supported_bindings(idp_entity_id=None, config=None):
"""Returns the list of bindings supported by an IDP
This is not clear in the pysaml2 code, so wrapping it in a util"""
if config is None:
# avoid circular import
from djangosaml2.conf import get_config
config = get_config()
# load metadata store from config
meta = getattr(config, 'metadata', {})
# if idp is None, assume only one exists so just use that
if idp_entity_id is None:
# .keys() returns dict_keys in python3.5+
idp_entity_id = list(available_idps(config).keys()).pop()
try:
return meta.service(idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service').keys()
except UnknownSystemEntity:
return []
def get_location(http_info):
"""Extract the redirect URL from a pysaml2 http_info object"""
assert 'headers' in http_info
headers = http_info['headers']
assert len(headers) == 1
header_name, header_value = headers[0]
assert header_name == 'Location'
return header_value
| apache-2.0 |
root-mirror/root | tutorials/roofit/rf101_basics.py | 11 | 1963 | ## \file
## \ingroup tutorial_roofit
## \notebook
## This tutorial illustrates the basic features of RooFit.
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Set up model
# ---------------------
# Declare variables x,mean,sigma with associated name, title, initial
# value and allowed range
x = ROOT.RooRealVar("x", "x", -10, 10)
mean = ROOT.RooRealVar("mean", "mean of gaussian", 1, -10, 10)
sigma = ROOT.RooRealVar("sigma", "width of gaussian", 1, 0.1, 10)
# Build gaussian pdf in terms of x,mean and sigma
gauss = ROOT.RooGaussian("gauss", "gaussian PDF", x, mean, sigma)
# Construct plot frame in 'x'
xframe = x.frame(ROOT.RooFit.Title("Gaussian pdf")) # RooPlot
# Plot model and change parameter values
# ---------------------------------------------------------------------------
# Plot gauss in frame (i.e. in x)
gauss.plotOn(xframe)
# Change the value of sigma to 3
sigma.setVal(3)
# Plot gauss in frame (i.e. in x) and draw frame on canvas
gauss.plotOn(xframe, ROOT.RooFit.LineColor(ROOT.kRed))
# Generate events
# -----------------------------
# Generate a dataset of 1000 events in x from gauss
data = gauss.generate(ROOT.RooArgSet(x), 10000) # ROOT.RooDataSet
# Make a second plot frame in x and draw both the
# data and the pdf in the frame
xframe2 = x.frame(ROOT.RooFit.Title(
"Gaussian pdf with data")) # RooPlot
data.plotOn(xframe2)
gauss.plotOn(xframe2)
# Fit model to data
# -----------------------------
# Fit pdf to data
gauss.fitTo(data)
# Print values of mean and sigma (that now reflect fitted values and
# errors)
mean.Print()
sigma.Print()
# Draw all frames on a canvas
c = ROOT.TCanvas("rf101_basics", "rf101_basics", 800, 400)
c.Divide(2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
xframe.GetYaxis().SetTitleOffset(1.6)
xframe.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
xframe2.GetYaxis().SetTitleOffset(1.6)
xframe2.Draw()
c.SaveAs("rf101_basics.png")
| lgpl-2.1 |
craigem/effectivepython | item_03.py | 1 | 1035 | #!/usr/bin/env python3
'''Item 3 from Effective Python'''
import logging
# Example 1
def to_str(bytes_or_str):
'''In Python 3, you'll need one method that takes a str or bytes and
always returns a str .'''
if isinstance(bytes_or_str, bytes):
value = bytes_or_str.decode('utf-8')
else:
value = bytes_or_str
return value # Instance of str
print(repr(to_str(b'foo')))
print(repr(to_str('foo')))
# Example 2
def to_bytes(bytes_or_str):
'''You'll need another method that takes a str or bytes and always
returns a bytes .'''
if isinstance(bytes_or_str, str):
value = bytes_or_str.encode('utf-8')
else:
value = bytes_or_str
return value # Instance of bytes
print(repr(to_bytes(b'foo')))
print(repr(to_bytes('foo')))
# Example 5
try:
import os
with open('random.bin', 'wb') as f:
f.write(os.urandom(10))
except:
logging.exception('Expected')
else:
exit()
# Example 6
with open('random1.bin', 'wb') as f:
f.write(os.urandom(10))
| gpl-3.0 |
orangeduck/PyAutoC | Python27/Lib/test/test_dis.py | 19 | 4548 | # Minimal tests for dis module
from test.test_support import run_unittest
import unittest
import sys
import dis
import StringIO
def _f(a):
print a
return 1
dis_f = """\
%-4d 0 LOAD_FAST 0 (a)
3 PRINT_ITEM
4 PRINT_NEWLINE
%-4d 5 LOAD_CONST 1 (1)
8 RETURN_VALUE
"""%(_f.func_code.co_firstlineno + 1,
_f.func_code.co_firstlineno + 2)
def bug708901():
for res in range(1,
10):
pass
dis_bug708901 = """\
%-4d 0 SETUP_LOOP 23 (to 26)
3 LOAD_GLOBAL 0 (range)
6 LOAD_CONST 1 (1)
%-4d 9 LOAD_CONST 2 (10)
12 CALL_FUNCTION 2
15 GET_ITER
>> 16 FOR_ITER 6 (to 25)
19 STORE_FAST 0 (res)
%-4d 22 JUMP_ABSOLUTE 16
>> 25 POP_BLOCK
>> 26 LOAD_CONST 0 (None)
29 RETURN_VALUE
"""%(bug708901.func_code.co_firstlineno + 1,
bug708901.func_code.co_firstlineno + 2,
bug708901.func_code.co_firstlineno + 3)
def bug1333982(x=[]):
assert 0, ([s for s in x] +
1)
pass
dis_bug1333982 = """\
%-4d 0 LOAD_CONST 1 (0)
3 POP_JUMP_IF_TRUE 38
6 LOAD_GLOBAL 0 (AssertionError)
9 BUILD_LIST 0
12 LOAD_FAST 0 (x)
15 GET_ITER
>> 16 FOR_ITER 12 (to 31)
19 STORE_FAST 1 (s)
22 LOAD_FAST 1 (s)
25 LIST_APPEND 2
28 JUMP_ABSOLUTE 16
%-4d >> 31 LOAD_CONST 2 (1)
34 BINARY_ADD
35 RAISE_VARARGS 2
%-4d >> 38 LOAD_CONST 0 (None)
41 RETURN_VALUE
"""%(bug1333982.func_code.co_firstlineno + 1,
bug1333982.func_code.co_firstlineno + 2,
bug1333982.func_code.co_firstlineno + 3)
_BIG_LINENO_FORMAT = """\
%3d 0 LOAD_GLOBAL 0 (spam)
3 POP_TOP
4 LOAD_CONST 0 (None)
7 RETURN_VALUE
"""
class DisTests(unittest.TestCase):
def do_disassembly_test(self, func, expected):
s = StringIO.StringIO()
save_stdout = sys.stdout
sys.stdout = s
dis.dis(func)
sys.stdout = save_stdout
got = s.getvalue()
# Trim trailing blanks (if any).
lines = got.split('\n')
lines = [line.rstrip() for line in lines]
expected = expected.split("\n")
import difflib
if expected != lines:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff(expected,
lines)))
def test_opmap(self):
self.assertEqual(dis.opmap["STOP_CODE"], 0)
self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst)
self.assertIn(dis.opmap["STORE_NAME"], dis.hasname)
def test_opname(self):
self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST")
def test_boundaries(self):
self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG)
self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT)
def test_dis(self):
self.do_disassembly_test(_f, dis_f)
def test_bug_708901(self):
self.do_disassembly_test(bug708901, dis_bug708901)
def test_bug_1333982(self):
# This one is checking bytecodes generated for an `assert` statement,
# so fails if the tests are run with -O. Skip this test then.
if __debug__:
self.do_disassembly_test(bug1333982, dis_bug1333982)
def test_big_linenos(self):
def func(count):
namespace = {}
func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"])
exec func in namespace
return namespace['foo']
# Test all small ranges
for i in xrange(1, 300):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
# Test some larger ranges too
for i in xrange(300, 5000, 10):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
def test_main():
run_unittest(DisTests)
if __name__ == "__main__":
test_main()
| bsd-2-clause |
megaumi/django | django/db/models/fields/files.py | 129 | 19629 | import datetime
import os
import warnings
from django import forms
from django.core import checks
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.db.models import signals
from django.db.models.fields import Field
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text
from django.utils.inspect import func_supports_parameter
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
if func_supports_parameter(self.storage.save, 'max_length'):
self.name = self.storage.save(name, content, max_length=self.field.max_length)
else:
warnings.warn(
'Backwards compatibility for storage backends without '
'support for the `max_length` argument in '
'Storage.save() will be removed in Django 1.10.',
RemovedInDjango110Warning, stacklevel=2
)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to them. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super(FileField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
# If upload_to is a callable, make sure that the path it returns is
# passed through get_valid_name() of the underlying storage.
if callable(self.upload_to):
directory_name, filename = os.path.split(self.upload_to(instance, filename))
filename = self.storage.get_valid_name(filename)
return os.path.normpath(os.path.join(directory_name, filename))
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(ImageField, self).contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| bsd-3-clause |
Titan-C/sympy | sympy/physics/tests/test_physics_matrices.py | 116 | 2619 | from sympy.physics.matrices import msigma, mgamma, minkowski_tensor, pat_matrix, mdft
from sympy import zeros, eye, I, Matrix, sqrt, Rational
def test_parallel_axis_theorem():
# This tests the parallel axis theorem matrix by comparing to test
# matrices.
# First case, 1 in all directions.
mat1 = Matrix(((2, -1, -1), (-1, 2, -1), (-1, -1, 2)))
assert pat_matrix(1, 1, 1, 1) == mat1
assert pat_matrix(2, 1, 1, 1) == 2*mat1
# Second case, 1 in x, 0 in all others
mat2 = Matrix(((0, 0, 0), (0, 1, 0), (0, 0, 1)))
assert pat_matrix(1, 1, 0, 0) == mat2
assert pat_matrix(2, 1, 0, 0) == 2*mat2
# Third case, 1 in y, 0 in all others
mat3 = Matrix(((1, 0, 0), (0, 0, 0), (0, 0, 1)))
assert pat_matrix(1, 0, 1, 0) == mat3
assert pat_matrix(2, 0, 1, 0) == 2*mat3
# Fourth case, 1 in z, 0 in all others
mat4 = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 0)))
assert pat_matrix(1, 0, 0, 1) == mat4
assert pat_matrix(2, 0, 0, 1) == 2*mat4
def test_Pauli():
#this and the following test are testing both Pauli and Dirac matrices
#and also that the general Matrix class works correctly in a real world
#situation
sigma1 = msigma(1)
sigma2 = msigma(2)
sigma3 = msigma(3)
assert sigma1 == sigma1
assert sigma1 != sigma2
# sigma*I -> I*sigma (see #354)
assert sigma1*sigma2 == sigma3*I
assert sigma3*sigma1 == sigma2*I
assert sigma2*sigma3 == sigma1*I
assert sigma1*sigma1 == eye(2)
assert sigma2*sigma2 == eye(2)
assert sigma3*sigma3 == eye(2)
assert sigma1*2*sigma1 == 2*eye(2)
assert sigma1*sigma3*sigma1 == -sigma3
def test_Dirac():
gamma0 = mgamma(0)
gamma1 = mgamma(1)
gamma2 = mgamma(2)
gamma3 = mgamma(3)
gamma5 = mgamma(5)
# gamma*I -> I*gamma (see #354)
assert gamma5 == gamma0 * gamma1 * gamma2 * gamma3 * I
assert gamma1 * gamma2 + gamma2 * gamma1 == zeros(4)
assert gamma0 * gamma0 == eye(4) * minkowski_tensor[0, 0]
assert gamma2 * gamma2 != eye(4) * minkowski_tensor[0, 0]
assert gamma2 * gamma2 == eye(4) * minkowski_tensor[2, 2]
assert mgamma(5, True) == \
mgamma(0, True)*mgamma(1, True)*mgamma(2, True)*mgamma(3, True)*I
def test_mdft():
assert mdft(1) == Matrix([[1]])
assert mdft(2) == 1/sqrt(2)*Matrix([[1,1],[1,-1]])
assert mdft(4) == Matrix([[Rational(1,2), Rational(1,2), Rational(1,2),\
Rational(1,2)],[Rational(1,2), -I/2, Rational(-1,2), I/2\
],[Rational(1,2), Rational(-1,2), Rational(1,2), Rational(-1,2)],\
[Rational(1,2), I/2, Rational(-1,2), -I/2]])
| bsd-3-clause |
khalibartan/Antidote-DM | Antidotes DM/Downloader/EBook.py | 2 | 2855 | import urllib2
import re
try:
flag=1
target = open('Proxy.txt', 'r')
target_lst = target.read()
target_lst = target_lst.split()
address = target_lst[0]
port = target_lst[1]
user = target_lst[2]
password = target_lst[3]
http_proxy = "http://" + user + ":" + password + "@" + address + ":" + port
https_proxy = "http://" + user + ":" + password + "@" + address + ":" + port
ftp_proxy = "http://" + user + ":" + password + "@" + address + ":" + port
proxydict = {
"http" : http_proxy,
"https" : https_proxy,
"ftp" : ftp_proxy
}
except:
flag=0
class Search_ebook(object):
def __init__(self, query):
super(Search_ebook, self).__init__()
self.query = query
self.url=self.create_search_url(self.query)
self.books_urls,self.book_names=self.get_books_info(self.url)
def create_search_url(self,query):
temp1='http://gen.lib.rus.ec/search.php?req='
temp2='&open=0&view=simple&phrase=1&column=def'
search_query=query.split()
search_query=tuple(search_query)
search_query='+'.join(search_query)
url=temp1+search_query+temp2
return url
def get_books_info(self,url):
if flag==1:
proxy = urllib2.ProxyHandler(proxydict)
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
web_page=urllib2.urlopen(url)
web_page=web_page.read()
book_start_urls=[x.start() for x in re.finditer('/get',web_page)]
book_end_urls=[]
for i in book_start_urls:
book_end_urls.append(web_page.find('title',i))
books_urls=['http://gen.lib.rus.ec'+web_page[book_start_urls[i]:book_end_urls[i]-2] for i in range(len(book_start_urls))]
book_start_names=[x.start() for x in re.finditer('book/',web_page)]
book_end_names=[]
for i in book_start_names:
book_end_names.append(web_page.find('</a>',i))
book_names=[web_page[book_start_names[i]:book_end_names[i]] for i in range(len(book_start_names))]
book_start_names=[]
for i in book_names:
book_start_names.append(i.find('>'))
book_names=[book_names[i][book_start_names[i]+1:] for i in range(len(book_start_names))]
books_names=[]
for books in book_names:
if '</font>' in books:
regex=re.compile('</font>[\w,\s,\d,:,(,)]*<')
bookName=regex.findall(books)
for x in bookName:
if len(x[7:-1])>2:
books_names.append(x[7:-1])
else:
books_names.append(books)
return books_urls,books_names
##s=Search_ebook('linear algebra')
##print 'lol'
##print s.book_names
| gpl-2.0 |
chubbymaggie/reverse | plasma/lib/graph.py | 1 | 27118 | #!/usr/bin/env python3
#
# PLASMA : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from time import time
from plasma.lib.utils import BRANCH_NEXT, BRANCH_NEXT_JUMP, debug__, list_starts_with
# For the loop's analysis
MAX_NODES = 800
# This class is used only for the decompilation mode. The analyzer create
# also a graph but only on-the-fly.
class Graph:
def __init__(self, dis, entry_point_addr):
# Each node contains a block (list) of instructions.
self.nodes = {} # ad -> [instruction, (prefetch)]
# For each address block, we store a list of next blocks.
# If there are 2 elements it means that the precedent instruction
# was a conditional jump :
# 1st : direct next instruction
# 2nd : for conditional jump : address of the jump
self.link_out = {} # ad -> [nxt1, nxt2]
self.link_in = {} # ad -> [prev, ...]
self.entry_point_addr = entry_point_addr
self.dis = dis
# For one loop : contains all address of the loop only
self.loops_set = {}
# For one loop : contains all address of the loop and sub-loops
self.loops_all = {}
# Rest of all address which are not in a loop
self.not_in_loop = set()
self.loops_start = set()
# Optimization
self.cond_jumps_set = set()
self.uncond_jumps_set = set()
self.equiv = {}
self.false_loops = set()
# Loop dependencies
self.deps = {}
self.rev_deps = {}
self.cache_path_exists = {}
# For each loop we search the last node that if we enter in it,
# we are sure to return to the loop.
self.last_node_loop = {}
self.all_deep_equiv = set()
self.skipped_loops_analysis = False
self.exit_or_ret = set()
# A jump is normally alone in a block, but for some architectures
# we save the prefetched instruction after.
def new_node(self, curr, prefetch, nxt):
ad = curr.address
self.nodes[ad] = [curr]
if nxt is not None:
self.link_out[ad] = nxt
if nxt is not None:
for n in nxt:
if n not in self.link_in:
self.link_in[n] = [ad]
else:
self.link_in[n].append(ad)
if prefetch is not None:
self.nodes[ad].append(prefetch)
def exists(self, inst):
return inst.address in self.nodes
# Concat instructions in single block
# jumps are in separated blocks
def simplify(self):
nodes = list(self.nodes.keys())
start = time()
for ad in nodes:
if ad in self.uncond_jumps_set or ad in self.cond_jumps_set:
continue
if ad not in self.link_in or len(self.link_in[ad]) != 1 or \
ad == self.entry_point_addr:
continue
pred = self.link_in[ad][0]
# don't fuse with jumps
if pred in self.uncond_jumps_set or pred in self.cond_jumps_set:
continue
if pred not in self.link_out or len(self.link_out[pred]) != 1:
continue
if ad in self.link_out:
self.link_out[pred] = self.link_out[ad]
else:
del self.link_out[pred]
self.nodes[pred] += self.nodes[ad]
if ad in self.link_out:
del self.link_out[ad]
del self.link_in[ad]
del self.nodes[ad]
# replace all addr wich refers to ad
for k, lst_i in self.link_in.items():
if ad in lst_i:
lst_i[lst_i.index(ad)] = pred
elapsed = time()
elapsed = elapsed - start
debug__("Graph simplified in %fs (%d nodes)" % (elapsed, len(self.nodes)))
def dot_loop_deps(self):
output = open("graph_loop_deps.dot", "w+")
output.write('digraph {\n')
output.write('node [fontname="liberation mono" style=filled fillcolor=white shape=box];\n')
for k, dp in self.deps.items():
output.write('node_%x_%x [label="(%x, %x)"' % (k[0], k[1], k[0], k[1]))
if k in self.false_loops:
output.write(' fillcolor="#B6FFDD"')
if k in self.all_deep_equiv:
output.write(' color="#ff0000"')
output.write('];\n')
for sub in dp:
output.write('node_%x_%x -> node_%x_%x;\n'
% (k[0], k[1], sub[0], sub[1]))
output.write('}\n')
output.close()
def dot_graph(self, jmptables):
output = open("graph.dot", "w+")
output.write('digraph {\n')
# output.write('graph [bgcolor="#aaaaaa" pad=20];\n')
# output.write('node [fontname="liberation mono" style=filled fillcolor="#333333" fontcolor="#d3d3d3" shape=box];\n')
output.write('node [fontname="liberation mono" style=filled fillcolor=white shape=box];\n')
keys = list(self.nodes.keys())
keys.sort()
for k in keys:
lst_i = self.nodes[k]
output.write('node_%x [label="' % k)
for i in lst_i:
output.write('0x%x: %s %s\\l' % (i.address, i.mnemonic, i.op_str))
output.write('"')
if k in self.loops_start:
output.write(' fillcolor="#FFFCC4"')
elif k not in self.link_out:
output.write(' fillcolor="#ff7777"')
elif k not in self.link_in:
output.write(' fillcolor="#B6FFDD"')
output.write('];\n')
for k, i in self.link_out.items():
if k in jmptables:
for ad in jmptables[k].table:
output.write('node_%x -> node_%x;\n' % (k, ad))
elif len(i) == 2:
# true green branch (jump is taken)
output.write('node_%x -> node_%x [color="#58DA9C"];\n'
% (k, i[BRANCH_NEXT_JUMP]))
# false red branch (jump is not taken)
output.write('node_%x -> node_%x [color="#ff7777"];\n'
% (k, i[BRANCH_NEXT]))
else:
output.write('node_%x -> node_%x;\n' % (k, i[BRANCH_NEXT]))
output.write('}')
output.close()
def __search_last_node_loop(self, l_prev_loop, l_start, l_set):
def __rec_branch_go_out(ad):
stack = [ad]
visited = set()
while stack:
ad = stack.pop(-1)
if ad not in l_set:
return True
if ad == l_start or ad in visited:
continue
visited.add(ad)
for n in self.link_out[ad]:
stack.append(n)
return False
# Start from the end of the loop
stack = []
visited = {l_start}
for prev in self.link_in[l_start]:
if prev in l_set:
stack.append(prev)
res = []
while stack:
ad = stack.pop(-1)
if ad in visited or ad not in l_set:
continue
visited.add(ad)
for prev in self.link_in[ad]:
if prev == l_start:
continue
go_out = False
for n in self.link_out[prev]:
if n not in l_set:
res.append(ad)
go_out = True
if not go_out:
stack.append(prev)
for ad in res:
if ad not in self.last_node_loop:
self.last_node_loop[ad] = set()
self.last_node_loop[ad].add((l_prev_loop, l_start))
def __is_inf_loop(self, l_set):
for ad in l_set:
if ad in self.link_out:
for nxt in self.link_out[ad]:
if nxt not in l_set:
return False
return True
def path_exists(self, from_addr, to_addr, loop_start):
def __path_exists(curr, local_visited):
stack = []
for n in self.link_out[from_addr]:
stack.append(n)
while stack:
curr = stack.pop(-1)
if curr == to_addr:
return True
if curr in local_visited:
continue
local_visited.add(curr)
if curr not in self.link_out or curr == loop_start:
continue
for n in self.link_out[curr]:
stack.append(n)
return False
if from_addr == to_addr:
return True
if from_addr not in self.link_out:
return False
if (from_addr, to_addr) in self.cache_path_exists:
return self.cache_path_exists[(from_addr, to_addr)]
local_visited = set()
res = __path_exists(from_addr, local_visited)
self.cache_path_exists[(from_addr, to_addr)] = res
return res
# Returns a set containing every addresses which are in paths from
# 'from_addr' to 'to_addr'.
def find_paths(self, from_addr, to_addr, global_visited):
def __rec_find_paths(curr, local_visited, path_set):
nonlocal isfirst
if curr == to_addr and not isfirst:
path_set.add(curr)
return
isfirst = False
if curr in local_visited:
return
local_visited.add(curr)
if curr in global_visited or curr not in self.link_out:
return
for n in self.link_out[curr]:
__rec_find_paths(n, local_visited, path_set)
if n in path_set:
path_set.add(curr)
isfirst = True
path_set = set()
local_visited = set()
__rec_find_paths(from_addr, local_visited, path_set)
return path_set
def __try_find_loops(self, entry, waiting, par_loops, l_set, is_sub_loop):
detected_loops = {}
keys = set(waiting.keys())
for ad in keys:
if l_set is not None and ad not in l_set:
continue
if (entry, ad) in self.loops_set:
continue
l = self.find_paths(ad, ad, par_loops)
# If the set is empty, it's not a loop
if l:
self.loops_set[(entry, ad)] = l
is_sub_loop.add(ad)
detected_loops[ad] = (entry, ad)
return detected_loops
def __manage_waiting(self, stack, visited, waiting, l_set, done):
keys = set(waiting.keys())
for ad in keys:
if l_set is not None and ad not in l_set:
continue
if len(waiting[ad]) == 0:
del waiting[ad]
done.add(ad)
stack.append((-1, ad))
def __until_stack_empty(self, stack, waiting, visited,
par_loops, l_set, is_sub_loop, done):
has_moved = False
while stack:
prev, ad = stack.pop(-1)
if ad in self.link_in and ad not in done:
l_in = self.link_in[ad]
if len(l_in) > 1 or l_set is not None and ad not in l_set:
if ad in waiting:
if prev in waiting[ad]:
waiting[ad].remove(prev)
else:
unseen = set(l_in)
unseen.remove(prev)
waiting[ad] = unseen
continue
if ad in visited:
continue
visited.add(ad)
if ad in self.link_out:
for n in self.link_out[ad]:
if n in par_loops:
continue
stack.append((ad, n))
has_moved = True
return has_moved
def __get_new_loops(self, waiting, detected_loops, l_set, is_sub_loop):
new_loops = set()
# Remove internal links to the beginning of the loop
# If later we enter in the loop it means that len(waiting[ad]) == 0
for ad, k in detected_loops.items():
loop = self.loops_set[k]
was_removed = False
for rest in set(waiting[ad]):
if rest in loop:
waiting[ad].remove(rest)
was_removed = True
if was_removed:
if len(waiting[ad]) == 0:
new_loops.add(ad)
del waiting[ad]
# Remove external jumps which are outside the current loop
for ad, unseen in waiting.items():
if l_set is not None and ad not in l_set:
continue
for i in set(unseen):
if l_set is not None and i not in l_set:
unseen.remove(i)
return new_loops
def __explore(self, entry, par_loops, visited, waiting, l_set, done):
stack = []
# Check if the first address (entry point of the function) is the
# beginning of a loop.
if not visited and entry in self.link_in:
for p in self.link_in[entry]:
stack.append((p, entry))
else:
if entry in self.link_out:
for n in self.link_out[entry]:
stack.append((entry, n))
visited.add(entry)
is_sub_loop = set()
while 1:
if self.__until_stack_empty(
stack, waiting, visited, par_loops, l_set, is_sub_loop, done):
self.__manage_waiting(stack, visited, waiting, l_set, done)
continue
detected_loops = self.__try_find_loops(
entry, waiting, par_loops, l_set, is_sub_loop)
new_loops = self.__get_new_loops(
waiting, detected_loops, l_set, is_sub_loop)
while new_loops:
# Follow loops
for ad in new_loops:
# TODO : optimize
v = set(visited)
v.add(ad)
pl = set(par_loops)
pl.add(ad)
l = self.loops_set[(entry, ad)]
self.__explore(ad, pl, v, waiting, l, set(done))
detected_loops = self.__try_find_loops(
entry, waiting, par_loops, l_set, is_sub_loop)
new_loops = self.__get_new_loops(
waiting, detected_loops, l_set, is_sub_loop)
self.__manage_waiting(stack, visited, waiting, l_set, done)
if not stack:
break
# Now for each current loop, we add the content of each sub-loops.
# It means that a loop contains all sub-loops (which is not the case
# in loops_set : in contains only the current loop).
for ad in is_sub_loop:
loop = set(self.loops_set[(entry, ad)])
self.loops_all[(entry, ad)] = loop
self.deps[(entry, ad)] = set()
for (prev, start), l in self.loops_set.items():
# Skip current loop
if (prev, start) == (entry, ad):
continue
# Is it a sub loop ?
if prev == ad and start != entry and start in loop:
k1 = (entry, ad)
k2 = (prev, start)
if k2 not in self.rev_deps:
self.rev_deps[k2] = set()
self.rev_deps[k2].add(k1)
self.deps[k1].add(k2)
self.loops_all[(entry, ad)].update(self.loops_all[(prev, start)])
def all_false(self, loops_key):
for k in loops_key:
if k not in self.false_loops:
return False
return True
# Mark recursively parent loops
def __rec_mark_parent_false(self, k):
self.false_loops.add(k)
if k not in self.rev_deps:
return
for par in self.rev_deps[k]:
if par in self.false_loops:
continue
if self.all_false(self.deps[par]):
self.__rec_mark_parent_false(par)
# Mark recursively sub loops
def __rec_mark_sub_false(self, k):
self.false_loops.add(k)
for sub in self.deps[k]:
if sub in self.false_loops:
continue
self.__rec_mark_sub_false(sub)
def __yield_cmp_loops(self, keys1, not_in_false=True):
# optim: don't compare twice two loops
keys2 = set(keys1)
for k1 in keys1:
keys2.remove(k1)
if not_in_false and k1 in self.false_loops:
continue
for k2 in keys2:
if not_in_false and k2 in self.false_loops:
continue
yield k1, k2
def __search_false_loops(self):
#
# Try to detect "strange" loops:
#
# example :
#
# if {
# goto label
# }
#
# while {
# if {
# statement_1
# label:
# statement_2
# } else {
# statement_3
# }
# }
#
# Check for example gotoinloop6 to see the result.
#
for (prev1, start1), (prev2, start2) in \
self.__yield_cmp_loops(self.loops_all.keys()):
l1 = self.loops_set[(prev1, start1)]
l2 = self.loops_set[(prev2, start2)]
if prev2 in l1 and \
start2 in l1 and \
start1 in l2:
if l2.issubset(l1):
self.__rec_mark_parent_false((prev2, start2))
self.__rec_mark_sub_false((prev2, start2))
elif prev1 in l2 and \
start1 in l2 and \
start2 in l1:
if l1.issubset(l2):
self.__rec_mark_parent_false((prev1, start1))
self.__rec_mark_sub_false((prev1, start1))
def __search_same_deep_equiv_loops(self):
#
# Search equivalent loops at the same deep, but compare with
# 'loops_all' -> each item contains all sub-loops instead of
# 'loops_set' wich contains only the loop.
#
# example:
#
# loop1
# / \
# loop2 loop3
#
# If loops_all[loop2] == loops_all[loop3], and if loop2 or loop3 is
# in false_loops, we removed these loops.
#
def do_add(k1, k2):
nonlocal idx_count, set_index, deep_equiv
l1 = self.loops_all[k1]
l2 = self.loops_all[k2]
if l1 == l2:
if k1 in set_index:
i = set_index[k1]
deep_equiv[i].add(k2)
self.all_deep_equiv.add(k2)
set_index[k2] = i
elif k2 in set_index:
i = set_index[k2]
deep_equiv[i].add(k1)
self.all_deep_equiv.add(k1)
set_index[k1] = i
else:
i = idx_count
idx_count += 1
deep_equiv[i] = {k1, k2}
set_index[k1] = i
set_index[k2] = i
self.all_deep_equiv.add(k1)
self.all_deep_equiv.add(k2)
set_index = {}
deep_equiv = {}
idx_count = 0
for k in self.deps:
for k1, k2 in self.__yield_cmp_loops(self.deps[k], False):
do_add(k1, k2)
for k1, k2 in self.__yield_cmp_loops(self.roots, False):
do_add(k1, k2)
if not deep_equiv:
return
last_length = 0
while last_length != len(self.false_loops):
last_length = len(self.false_loops)
for i, keys in deep_equiv.items():
nb_false = 0
for k in keys:
if k in self.false_loops:
nb_false += 1
if nb_false > 0:
for k in set(keys):
if k in self.false_loops:
continue
subs = self.deps[k]
if len(subs) == 0 or self.all_false(subs):
keys.remove(k)
self.__rec_mark_parent_false(k)
def __prune_loops(self):
def set_paths(k, p):
nonlocal deps, loop_paths
loop_paths[k].append(p)
i = 0
for sub in deps[k]:
set_paths(sub, p + [i])
i += 1
#
# Create loop paths
# example:
#
# loop1
# / \
# loop2 loop3
#
# paths:
# loop1 = [0]
# loop2 = [0, 0]
# loop3 = [0, 1]
#
deps = self.deps
loop_paths = {}
for k in deps:
deps[k] = list(deps[k])
loop_paths[k] = []
i = 0
for k in self.roots:
set_paths(k, [i])
i += 1
# If there are more than one path for a loop, it means
# that a loop has more than one parent. The goal is to
# determine which parent is "wrong". If there is two parents
# we can't say anything.
prefix_to_remove = []
for k, paths in loop_paths.items():
if len(paths) > 2:
stop_at_first_diff = False
i = 0
prefix = []
while not stop_at_first_diff:
count = {}
for p in paths:
curr = p[i]
if curr in count:
count[curr] += 1
else:
count[curr] = 1
if len(count) > 1:
# Keep only the parent loop which has only one reference
# to this loop (and ONLY this loop must have ONLY ONE
# reference).
n = 0
for idx, c in count.items():
if c == 1:
n += 1
if n == 1:
# Remove all others loops
for loop_idx, c in count.items():
if c != 1:
prefix.append(loop_idx)
if prefix not in prefix_to_remove:
prefix_to_remove.append(prefix)
stop_at_first_diff = True
else:
# here len(count) == 1
prefix.append(curr)
i += 1
# Remove all loops which start with these prefix
# if prefix_to_remove:
# debug__(loop_paths)
# for prefix in prefix_to_remove:
# debug__("prune %s" % repr(prefix))
for k, paths in loop_paths.items():
if k in self.false_loops:
continue
all_matches = True
for p in paths:
one_match = False
for prefix in prefix_to_remove:
if list_starts_with(p, prefix):
one_match = True
break
if not one_match:
all_matches = False
break
if all_matches:
self.false_loops.add(k)
def __update_loops(self):
def rec_remove(k):
if k not in self.loops_all:
return
del self.loops_all[k]
del self.loops_set[k]
for sub in self.deps[k]:
if sub in self.false_loops:
rec_remove(sub)
for k in self.false_loops:
if k not in self.rev_deps or k in self.all_deep_equiv:
rec_remove(k)
def loop_detection(self, entry, bypass_false_search=False):
start = time()
# Equivalent loops at a same deep in the loops dependencies tree
self.deep_equiv = set()
# For one loop : contains all address of the loop only
self.loops_set = {}
# For one loop : contains all address of the loop and sub-loops
self.loops_all = {}
# Loop dependencies
self.deps = {}
self.rev_deps = {}
# Loops marked as "False"
self.false_loops = set()
if len(self.nodes) > MAX_NODES:
self.skipped_loops_analysis = True
return
self.__explore(entry, set(), set(), {}, None, set())
self.roots = self.loops_set.keys() - self.rev_deps.keys()
self.__prune_loops()
if not bypass_false_search:
self.__search_false_loops()
self.__search_same_deep_equiv_loops()
self.__update_loops()
# Compute all address which are not in a loop
in_loop = set()
for l in self.loops_set.items():
in_loop.update(l[1])
# Rest of all address which are not in a loop
self.not_in_loop = self.nodes.keys() - in_loop
# Search inifinite loops
self.infinite_loop = set()
for l_curr_loop, l_set in self.loops_all.items():
if self.__is_inf_loop(l_set):
self.infinite_loop.add(l_curr_loop)
# Save first address of loops
self.loops_start = set()
for _, l_start in self.loops_all:
self.loops_start.add(l_start)
# For each loop we search the last node that if we enter in it,
# we are sure to return to the loop.
self.last_node_loop = {}
for (l_prev_loop, l_start), l_set in self.loops_all.items():
if (l_prev_loop, l_start) not in self.infinite_loop:
self.__search_last_node_loop(l_prev_loop, l_start, l_set)
elapsed = time()
elapsed = elapsed - start
debug__("Exploration: found %d loop(s) in %fs" %
(len(self.loops_all), elapsed))
| gpl-3.0 |
mdavid/horizon | openstack_dashboard/contrib/sahara/content/data_processing/data_image_registry/tests.py | 25 | 5220 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api as dash_api
from openstack_dashboard.contrib.sahara import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse(
'horizon:project:data_processing.data_image_registry:index')
REGISTER_URL = reverse(
'horizon:project:data_processing.data_image_registry:register')
class DataProcessingImageRegistryTests(test.TestCase):
@test.create_stubs({api.sahara: ('image_list',)})
def test_index(self):
api.sahara.image_list(IsA(http.HttpRequest)) \
.AndReturn(self.images.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res,
'project/data_processing.data_image_registry/image_registry.html')
self.assertContains(res, 'Image Registry')
self.assertContains(res, 'Image')
self.assertContains(res, 'Tags')
@test.create_stubs({api.sahara: ('image_get',
'image_update',
'image_tags_update',
'image_list'),
dash_api.glance: ('image_list_detailed',)})
def test_register(self):
image = self.images.first()
image_id = image.id
test_username = 'myusername'
test_description = 'mydescription'
api.sahara.image_get(IsA(http.HttpRequest),
image_id).MultipleTimes().AndReturn(image)
dash_api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'owner': self.user.id,
'status': 'active'}) \
.AndReturn((self.images.list(), False, False))
api.sahara.image_update(IsA(http.HttpRequest),
image_id,
test_username,
test_description) \
.AndReturn(True)
api.sahara.image_tags_update(IsA(http.HttpRequest),
image_id,
{}) \
.AndReturn(True)
api.sahara.image_list(IsA(http.HttpRequest)) \
.AndReturn([])
self.mox.ReplayAll()
res = self.client.post(
REGISTER_URL,
{'image_id': image_id,
'user_name': test_username,
'description': test_description,
'tags_list': '{}'})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.sahara: ('image_list',
'image_unregister')})
def test_unregister(self):
image = self.images.first()
api.sahara.image_list(IsA(http.HttpRequest)) \
.AndReturn(self.images.list())
api.sahara.image_unregister(IsA(http.HttpRequest), image.id)
self.mox.ReplayAll()
form_data = {'action': 'image_registry__delete__%s' % image.id}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.sahara: ('image_get',
'image_update',
'image_tags_update')})
def test_edit_tags(self):
image = self.registered_images.first()
api.sahara.image_get(IsA(http.HttpRequest),
image.id).MultipleTimes().AndReturn(image)
api.sahara.image_update(IsA(http.HttpRequest),
image.id,
image.username,
image.description) \
.AndReturn(True)
api.sahara.image_tags_update(IsA(http.HttpRequest),
image.id,
{"0": "mytag"}) \
.AndReturn(True)
self.mox.ReplayAll()
edit_tags_url = reverse(
'horizon:project:data_processing.data_image_registry:edit_tags',
args=[image.id])
res = self.client.post(
edit_tags_url,
{'image_id': image.id,
'user_name': image.username,
'description': image.description,
'tags_list': '{"0": "mytag"}'})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
| apache-2.0 |
TwinkleChawla/nova | nova/test.py | 14 | 15705 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import contextlib
import datetime
import eventlet
eventlet.monkey_patch(os=False)
import copy
import inspect
import mock
import os
import fixtures
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log.fixture import logging_error as log_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslotest import moxstubout
import six
import testtools
from nova import context
from nova import db
from nova.network import manager as network_manager
from nova.objects import base as objects_base
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import conf_fixture
from nova.tests.unit import policy_fixture
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v21')
logging.register_options(CONF)
CONF.set_override('use_stderr', False)
logging.setup(CONF, 'nova')
_TRUE_VALUES = ('True', 'true', '1', 'yes')
if six.PY3:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
else:
nested = contextlib.nested
class SampleNetworks(fixtures.Fixture):
"""Create sample networks in the database."""
def __init__(self, host=None):
self.host = host
def setUp(self):
super(SampleNetworks, self).setUp()
ctxt = context.get_admin_context()
network = network_manager.VlanManager(host=self.host)
bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
cidr='10.0.0.0/8',
multi_host=CONF.multi_host,
num_networks=CONF.num_networks,
network_size=CONF.network_size,
cidr_v6=CONF.fixed_range_v6,
gateway=CONF.gateway,
gateway_v6=CONF.gateway_v6,
bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=CONF.vpn_start,
vlan_start=CONF.vlan_start,
dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
class TestingException(Exception):
pass
class skipIf(object):
def __init__(self, condition, reason):
self.condition = condition
self.reason = reason
def __call__(self, func_or_cls):
condition = self.condition
reason = self.reason
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
if condition:
raise testtools.TestCase.skipException(reason)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_func = getattr(func_or_cls, 'setUp')
@six.wraps(orig_func)
def new_func(self, *args, **kwargs):
if condition:
raise testtools.TestCase.skipException(reason)
orig_func(self, *args, **kwargs)
func_or_cls.setUp = new_func
return func_or_cls
else:
raise TypeError('skipUnless can be used only with functions or '
'classes')
def _patch_mock_to_raise_for_invalid_assert_calls():
def raise_for_invalid_assert_calls(wrapped):
def wrapper(_self, name):
valid_asserts = [
'assert_called_with',
'assert_called_once_with',
'assert_has_calls',
'assert_any_calls']
if name.startswith('assert') and name not in valid_asserts:
raise AttributeError('%s is not a valid mock assert method'
% name)
return wrapped(_self, name)
return wrapper
mock.Mock.__getattr__ = raise_for_invalid_assert_calls(
mock.Mock.__getattr__)
# NOTE(gibi): needs to be called only once at import time
# to patch the mock lib
_patch_mock_to_raise_for_invalid_assert_calls()
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests.
Due to the slowness of DB access, please consider deriving from
`NoDBTestCase` first.
"""
USES_DB = True
REQUIRES_LOCKING = False
TIMEOUT_SCALING_FACTOR = 1
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
self.useFixture(nova_fixtures.Timeout(
os.environ.get('OS_TEST_TIMEOUT', 0),
self.TIMEOUT_SCALING_FACTOR))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(log_fixture.get_logging_handle_error_fixture())
self.useFixture(nova_fixtures.OutputStreamCapture())
self.useFixture(nova_fixtures.StandardLogging())
# NOTE(sdague): because of the way we were using the lock
# wrapper we eneded up with a lot of tests that started
# relying on global external locking being set up for them. We
# consider all of these to be *bugs*. Tests should not require
# global external locking, or if they do, they should
# explicitly set it up themselves.
#
# The following REQUIRES_LOCKING class parameter is provided
# as a bridge to get us there. No new tests should be added
# that require it, and existing classes and tests should be
# fixed to not need it.
if self.REQUIRES_LOCKING:
lock_path = self.useFixture(fixtures.TempDir()).path
self.fixture = self.useFixture(
config_fixture.Config(lockutils.CONF))
self.fixture.config(lock_path=lock_path,
group='oslo_concurrency')
self.useFixture(conf_fixture.ConfFixture(CONF))
self.useFixture(nova_fixtures.RPCFixture('nova.test'))
if self.USES_DB:
self.useFixture(nova_fixtures.Database())
# NOTE(blk-u): WarningsFixture must be after the Database fixture
# because sqlalchemy-migrate messes with the warnings filters.
self.useFixture(nova_fixtures.WarningsFixture())
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.NovaObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.NovaObjectRegistry._registry._obj_classes)
self.addCleanup(self._restore_obj_registry)
# NOTE(mnaser): All calls to utils.is_neutron() are cached in
# nova.utils._IS_NEUTRON. We set it to None to avoid any
# caching of that value.
utils._IS_NEUTRON = None
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
self.useFixture(nova_fixtures.PoisonFunctions())
def _restore_obj_registry(self):
objects_base.NovaObjectRegistry._registry._obj_classes = \
self._base_test_obj_backup
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
# NOTE(gmann): Skip attribute 'id' because if tests are being
# generated using testscenarios then, 'id' attribute is being
# added during cloning the tests. And later that 'id' attribute
# is being used by test suite to generate the results for each
# newly generated tests by testscenarios.
if key != 'id':
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(
nova_fixtures.ServiceFixture(name, host, **kwargs))
return svc.service
def assertJsonEqual(self, expected, observed):
"""Asserts that 2 complex data structures are json equivalent.
We use data structures which serialize down to json throughout
the code, and often times we just need to know that these are
json equivalent. This means that list order is not important,
and should be sorted.
Because this is a recursive set of assertions, when failure
happens we want to expose both the local failure and the
global view of the 2 data structures being compared. So a
MismatchError which includes the inner failure as the
mismatch, and the passed in expected / observed as matchee /
matcher.
"""
if isinstance(expected, six.string_types):
expected = jsonutils.loads(expected)
if isinstance(observed, six.string_types):
observed = jsonutils.loads(observed)
def sort_key(x):
if isinstance(x, (set, list)) or isinstance(x, datetime.datetime):
return str(x)
if isinstance(x, dict):
items = ((sort_key(key), sort_key(value))
for key, value in x.items())
return sorted(items)
return x
def inner(expected, observed):
if isinstance(expected, dict) and isinstance(observed, dict):
self.assertEqual(len(expected), len(observed))
expected_keys = sorted(expected)
observed_keys = sorted(observed)
self.assertEqual(expected_keys, observed_keys)
for key in list(six.iterkeys(expected)):
inner(expected[key], observed[key])
elif (isinstance(expected, (list, tuple, set)) and
isinstance(observed, (list, tuple, set))):
self.assertEqual(len(expected), len(observed))
expected_values_iter = iter(sorted(expected, key=sort_key))
observed_values_iter = iter(sorted(observed, key=sort_key))
for i in range(len(expected)):
inner(next(expected_values_iter),
next(observed_values_iter))
else:
self.assertEqual(expected, observed)
try:
inner(expected, observed)
except testtools.matchers.MismatchError as e:
inner_mismatch = e.mismatch
# inverting the observed / expected because testtools
# error messages assume expected is second. Possibly makes
# reading the error messages less confusing.
raise testtools.matchers.MismatchError(observed, expected,
inner_mismatch, verbose=True)
def assertPublicAPISignatures(self, baseinst, inst):
def get_public_apis(inst):
methods = {}
def findmethods(object):
return inspect.ismethod(object) or inspect.isfunction(object)
for (name, value) in inspect.getmembers(inst, findmethods):
if name.startswith("_"):
continue
methods[name] = value
return methods
baseclass = baseinst.__class__.__name__
basemethods = get_public_apis(baseinst)
implmethods = get_public_apis(inst)
extranames = []
for name in sorted(implmethods.keys()):
if name not in basemethods:
extranames.append(name)
self.assertEqual([], extranames,
"public APIs not listed in base class %s" %
baseclass)
for name in sorted(implmethods.keys()):
baseargs = inspect.getargspec(basemethods[name])
implargs = inspect.getargspec(implmethods[name])
self.assertEqual(baseargs, implargs,
"%s args don't match base class %s" %
(name, baseclass))
class APICoverage(object):
cover_api = None
def test_api_methods(self):
self.assertTrue(self.cover_api is not None)
api_methods = [x for x in dir(self.cover_api)
if not x.startswith('_')]
test_methods = [x[5:] for x in dir(self)
if x.startswith('test_')]
self.assertThat(
test_methods,
testtools.matchers.ContainsAll(api_methods))
class TimeOverride(fixtures.Fixture):
"""Fixture to start and remove time override."""
def setUp(self):
super(TimeOverride, self).setUp()
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
class NoDBTestCase(TestCase):
"""`NoDBTestCase` differs from TestCase in that DB access is not supported.
This makes tests run significantly faster. If possible, all new tests
should derive from this class.
"""
USES_DB = False
class BaseHookTestCase(NoDBTestCase):
def assert_has_hook(self, expected_name, func):
self.assertTrue(hasattr(func, '__hook_name__'))
self.assertEqual(expected_name, func.__hook_name__)
class MatchType(object):
"""Matches any instance of a specified type
The MatchType class is a helper for use with the
mock.assert_called_with() method that lets you
assert that a particular parameter has a specific
data type. It enables strict check than the built
in mock.ANY helper, and is the equivalent of the
mox.IsA() function from the legacy mox library
Example usage could be:
mock_some_method.assert_called_once_with(
"hello",
MatchType(objects.Instance),
mock.ANY,
"world",
MatchType(objects.KeyPair))
"""
def __init__(self, wanttype):
self.wanttype = wanttype
def __eq__(self, other):
return type(other) == self.wanttype
def __ne__(self, other):
return type(other) != self.wanttype
def __repr__(self):
return "<MatchType:" + str(self.wanttype) + ">"
| apache-2.0 |
nstockton/nvda-add-ons | applications/IfInterpreters/appModules/magnetic.py | 1 | 1910 | # -*- coding: utf-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2020 Nick Stockton <nstockton@gmail.com>
# Portions of This Work Copyright (C) 2006-2013 NV Access Limited
# Built-in NVDA Modules
import api
from NVDAObjects.behaviors import Terminal
from NVDAObjects.window import Window, DisplayModelEditableText, DisplayModelLiveText
import oleacc
# Local shared functions and classes
from .ifcommon import *
CONFIG_FILE_NAME = "magnetic_config.ini"
class MyDisplayModelLiveText(GameDisplayModelLiveText):
def event_textChange(self):
super(MyDisplayModelLiveText, self).event_textChange()
self.gotoPrompt()
def _getTextLines(self):
output = self.myGetTextLines(self)
obj = self.parent.next.next.firstChild
if obj.windowClassName == "msctls_statusbar32" and obj.windowControlID == 59393 and obj.IAccessibleRole == oleacc.ROLE_SYSTEM_STATUSBAR:
for child in obj.children:
if child and child.windowText != "Windows Magnetic":
output.extend(self.myGetTextLines(child))
return (output, [])
class IO(Window):
def event_gainFocus(self):
super(IO, self).event_gainFocus()
if not ADDON_CONFIG:
loadAddonConfig(CONFIG_FILE_NAME)
self.startMonitoring()
self.TextInfo.stripOuterWhitespace = True
def event_loseFocus(self):
self.stopMonitoring()
if not ADDON_CONFIG:
loadAddonConfig(CONFIG_FILE_NAME)
saveAddonConfig()
class AppModule(GameAppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.windowClassName == "AfxFrameOrView70s" and obj.windowControlID == 59648 and obj.IAccessibleRole == oleacc.ROLE_SYSTEM_CLIENT:
try:
clsList.remove(DisplayModelEditableText)
except ValueError:
pass
clsList[0:0] = (IO, Terminal, MyDisplayModelLiveText)
| gpl-2.0 |
niklabh/netmag | netmag/wsgi.py | 1 | 1419 | """
WSGI config for netmag project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "netmag.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "netmag.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
shuggiefisher/potato | django/test/simple.py | 150 | 15012 | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase
from django.utils import unittest
try:
all
except NameError:
from django.utils.itercompat import all
__all__ = ('DjangoTestRunner', 'DjangoTestSuiteRunner', 'run_tests')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
class DjangoTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"DjangoTestRunner is deprecated; it's functionality is indistinguishable from TextTestRunner",
PendingDeprecationWarning
)
super(DjangoTestRunner, self).__init__(*args, **kwargs)
def get_tests(app_module):
try:
app_path = app_module.__name__.split('.')[:-1]
test_module = __import__('.'.join(app_path + [TEST_MODULE]), {}, {}, TEST_MODULE)
except ImportError, e:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
import os.path
from imp import find_module
try:
mod = find_module(TEST_MODULE, [os.path.dirname(app_module.__file__)])
except ImportError:
# 'tests' module doesn't exist. Move on.
test_module = None
else:
# The module exists, so there must be an import error in the
# test module itself. We don't need the module; so if the
# module was a single file module (i.e., tests.py), close the file
# handle returned by find_module. Otherwise, the test module
# is a directory, and there is nothing to close.
if mod[0]:
mod[0].close()
raise
return test_module
def build_suite(app_module):
"Create a complete Django test suite for the provided application module"
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module))
try:
suite.addTest(doctest.DocTestSuite(test_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(TestClass)
except TypeError:
raise ValueError("Test label '%s' does not refer to a test class" % label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, (db_name, aliases) = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies):
# Actually create the database for the first connection
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend where
# the name isn't important -- e.g., SQLite, which uses :memory:.
# Force create the database instead of assuming it's a duplicate.
old_names.append((connection, db_name, True))
connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
from django.db import connections
old_names, mirrors = old_config
# Point all the mirrors back to the originals
for alias, old_name in mirrors:
connections[alias].settings_dict['NAME'] = old_name
# Destroy all the non-mirror databases
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
else:
connection.settings_dict['NAME'] = old_name
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def run_tests(test_labels, verbosity=1, interactive=True, failfast=False, extra_tests=None):
import warnings
warnings.warn(
'The run_tests() test runner has been deprecated in favor of DjangoTestSuiteRunner.',
DeprecationWarning
)
test_runner = DjangoTestSuiteRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
return test_runner.run_tests(test_labels, extra_tests=extra_tests)
| bsd-3-clause |
3dfxsoftware/cbss-addons | mrp_workorder_lot/wizard/mrp_consume_produce.py | 1 | 14643 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# info@vauxoo.com
############################################################################
# Coded by: julio (julio@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv, fields
from openerp.tools.translate import _
class mrp_consume(osv.TransientModel):
_inherit = 'mrp.consume'
def _get_default_wo_lot(self, cr, uid, context=None):
"""
@return: The first Work Order Lot to produce (cardinal order).
"""
context = context or {}
res = False
production_obj = self.pool.get('mrp.production')
active_id = context.get('active_id', False)
active_model = context.get('active_model', False)
if active_id:
if active_model == 'mrp.production':
production_id = active_id
wol_brws = production_obj.browse(
cr, uid, production_id, context=context).wo_lot_ids
res = [wol_brw.id
for wol_brw in wol_brws
if wol_brw.state == 'draft']
elif active_model == 'mrp.workorder.lot':
res = [active_id]
else:
raise osv.except_osv(
_('Error!!'),
_('This wizard only can be call from the manufacturing'
' order form or the Work Orders by Active Lot menu.'))
return res and res[0] or False
def _get_default_mo_id(self, cr, uid, context=None):
"""
Return the production id.
"""
context = context or {}
wol_obj = self.pool.get('mrp.workorder.lot')
res = False
active_id = context.get('active_id', False)
active_model = context.get('active_model', False)
if active_id:
if active_model == 'mrp.production':
res = active_id
elif active_model == 'mrp.workorder.lot':
res = wol_obj.browse(
cr, uid, active_id, context=context).production_id.id
else:
raise osv.except_osv(
_('Error!!'),
_('This wizard only can be call from the manufacturing'
' order form or the Work Orders by Active Lot menu.'))
return res
_columns = {
'production_id': fields.many2one(
'mrp.production',
string='Manufacturing Order',
help='Manufacturing Order'),
'wo_lot_id': fields.many2one(
'mrp.workorder.lot',
required=True,
string='Work Orders Lots',
help='Work Orders Lots.'),
}
_defaults = {
'production_id': _get_default_mo_id,
'wo_lot_id': _get_default_wo_lot,
}
def onchange_wo_lot_ids(self, cr, uid, ids, production_id, wo_lot_id,
consume_line_ids, context=None):
"""
Loads product information from the work order selected.
@param production_id: manufacturing order id.
@param wo_lot_id: selected work order lot.
@param consume_line_ids: current cosumne product lines.
"""
context = context or {}
consume_line_list = list()
production_obj = self.pool.get('mrp.production')
production_brw = production_obj.browse(
cr, uid, production_id, context=context)
if wo_lot_id:
if not production_brw.move_lines:
raise osv.except_osv(
_('Error!'),
_('You have not more Product to Consume, please add new'
' lines by clicking the Product Request/Return Button.'))
consume_line_list = self._get_consume_line_list_with_wol_percent(
cr, uid, production_id, wo_lot_id, context=context)
return {'value': {'consume_line_ids': consume_line_list}}
def _get_consume_line_list_with_wol_percent(self, cr, uid, production_id,
wo_lot_id, context=None):
"""
Get a list of consume lines to create with a modification of the
product qty with work order lot related percentage.
@param production_id: manufacturing order id.
@param wo_lot_id: work order lot id.
@return: a list of consume lines to create.
"""
context = context or {}
production_obj = self.pool.get('mrp.production')
wol_obj = self.pool.get('mrp.workorder.lot')
production_brw = production_obj.browse(
cr, uid, production_id, context=context)
wol_brw = wol_obj.browse(cr, uid, wo_lot_id, context=context)
consume_line_list = self._get_consume_lines_list(
cr, uid, production_id, context=context)
sheduled_qty = dict(set(
[(product_line.product_id.id, product_line.product_qty)
for product_line in production_brw.product_lines]
))
for consume_line in consume_line_list:
consume_line.update({
'quantity': sheduled_qty[consume_line['product_id']]
* wol_brw.percentage/100.0})
return consume_line_list
def action_active_lot(self, cr, uid, ids, context=None):
"""
Get the work order lot in the consume wizard and update its state
to picking state.
@return: True
"""
context = context or {}
wol_obj = self.pool.get('mrp.workorder.lot')
consume = self.browse(cr, uid, ids, context=context)[0]
wol_obj.write(cr, uid, consume.wo_lot_id.id,
{'state': 'picking'}, context=context)
return True
def action_consume(self, cr, uid, ids, context=None):
"""
Overwrite action_consume() method to change the work order lot state
from picking to open state.
"""
context = context or {}
wol_obj = self.pool.get('mrp.workorder.lot')
res = super(mrp_consume, self).action_consume(
cr, uid, ids, context=context)
if context.get('active_model', False) == 'mrp.workorder.lot':
wol_id = context.get('active_id', False)
if wol_id:
wol_obj.write(cr, uid, wol_id, {'state': 'open'},
context=context)
else:
raise osv.except_osv(
_('Error!'),
_('No valid operation. no work order lot active_id.')
)
#~ refresh kaban view
view_id, search_view_id, action_help = \
self._get_kanban_view_data(cr, uid, context=context)
act_obj = self.pool.get('ir.actions.act_window')
return {
'view_id': view_id,
'view_type': 'form',
'view_mode': 'kanban',
'views': [(view_id, 'kanban')],
'search_view_id': search_view_id,
'res_model': 'mrp.workorder.lot',
'type': 'ir.actions.act_window',
'target': 'inlineview',
'context': {'search_default_wol_picking': True},
'help': action_help
}
def _get_kanban_view_data(self, cr, uid, context=None):
"""
@return: a tuple (view_id, search_view_id, action_help)
related to the kaban view for ready to picking work order lots.
"""
context = context or {}
ir_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
module_name = 'mrp_workorder_lot'
dummy, view_id = ir_obj.get_object_reference(
cr, uid, module_name, 'mrp_workorder_lot_kanban_view')
dummy, search_view_id = ir_obj.get_object_reference(
cr, uid, module_name, 'mrp_wol_search_view')
dummy, action_window_id = ir_obj.get_object_reference(
cr, uid, module_name, 'mrp_wol_picking_kanban_action')
action_help = act_obj.browse(
cr, uid, action_window_id, context=context).help
return (view_id, search_view_id, action_help)
class mrp_produce(osv.TransientModel):
_inherit = 'mrp.produce'
def _get_default_mo_id(self, cr, uid, context=None):
"""
Return the production id.
"""
context = context or {}
wol_obj = self.pool.get('mrp.workorder.lot')
res = False
active_id = context.get('active_id', False)
active_model = context.get('active_model', False)
if active_id:
if active_model == 'mrp.production':
res = active_id
elif active_model == 'mrp.workorder.lot':
res = wol_obj.browse(
cr, uid, active_id, context=context).production_id.id
else:
raise osv.except_osv(
_('Error!!'),
_('This wizard only can be call from the manufacturing'
' order form or the Work Orders by Active Lot menu.'))
return res
def _get_default_wo_lot(self, cr, uid, context=None):
"""
@return: The first Work Order Lot ready to Produce (cardinal order).
"""
context = context or {}
res = False
production_obj = self.pool.get('mrp.production')
active_id = context.get('active_id', False)
active_model = context.get('active_model', False)
if active_id:
if active_model == 'mrp.production':
production_id = active_id
wol_brws = production_obj.browse(
cr, uid, production_id, context=context).wo_lot_ids
res = [wol_brw.id
for wol_brw in wol_brws
if wol_brw.state == 'ready']
elif active_model == 'mrp.workorder.lot':
res = [active_id]
else:
raise osv.except_osv(
_('Error!!'),
_('This wizard only can be call from the manufacturing'
' order form or the Work Orders by Active Lot menu.'))
if not res:
raise osv.except_osv(
_('Warning!!'),
_('You can Produce because you have not Ready to Finish'
' Work Order Lots.'))
return res and res[0] or False
_columns = {
'production_id': fields.many2one(
'mrp.production',
string='Manufacturing Order',
help='Manufacturing Order'),
'wo_lot_id': fields.many2one(
'mrp.workorder.lot',
required=True,
string='Work Orders Lots',
help='Work Orders Lots.'),
}
_defaults = {
'production_id': _get_default_mo_id,
'wo_lot_id': _get_default_wo_lot,
}
def action_produce(self, cr, uid, ids, context=None):
"""
Overwrite the action_produce() method to set the Work Order Lot to
Done state when the lot is produced and also add the serial number to
produced products moves created.
"""
context = context or {}
wol_obj = self.pool.get('mrp.workorder.lot')
sm_obj = self.pool.get('stock.move')
#~ create convencianal moves
for produce in self.browse(cr, uid, ids, context=context):
res = super(mrp_produce, self).action_produce(
cr, uid, ids, context=context)
#~ add the serial number to the moves
for produce in self.browse(cr, uid, ids, context=context):
prodlot_id = dict(
[(produce_line.product_id.id, produce_line.prodlot_id.id)
for produce_line in produce.produce_line_ids])
for move in produce.production_id.move_created_ids2:
sm_obj.write(
cr, uid, move.id,
{'prodlot_id': prodlot_id[move.product_id.id]},
context=context)
#~ set work order lot to done
wol_obj.write(cr, uid, produce.wo_lot_id.id, {'state': 'done'},
context=context)
return res
def _get_produce_line_prodlot_id(self, cr, uid, product_id, context=None):
"""
Return the first production lot id found for the given product.
@param product_id: product id.
"""
# Note: First my intention was to use the move_brw.prodlot_id to get
# the prodlot_id but this field is not set, I imagine that is set
# before the move is consumed.
context = context or {}
prodlot_obj = self.pool.get('stock.production.lot')
prodlot_ids = \
prodlot_obj.search(
cr, uid, [('product_id', '=', product_id)], context=context) \
or False
return prodlot_ids and prodlot_ids[0] or False
def _get_produce_line_values(self, cr, uid, move_id, context=None):
"""
return the dictionary that fill the produce lines with the move values.
@param move_id: move id.
"""
context = context or {}
res = super(mrp_produce, self)._get_produce_line_values(
cr, uid, move_id, context=context)
res.update({'prodlot_id': self._get_produce_line_prodlot_id(
cr, uid, res['product_id'], context=context)})
return res
class mrp_produce_line(osv.TransientModel):
_inherit = 'mrp.produce.line'
_columns = {
'prodlot_id': fields.many2one(
'stock.production.lot',
'Serial Number',
help='Production Serial Number for Production Lot.'),
}
| gpl-2.0 |
fintech-circle/edx-platform | openedx/core/djangoapps/auth_exchange/tests/test_forms.py | 30 | 4027 | # pylint: disable=no-member
"""
Tests for OAuth token exchange forms
"""
import unittest
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase
from django.test.client import RequestFactory
import httpretty
from provider import scope
import social.apps.django_app.utils as social_utils
from third_party_auth.tests.utils import ThirdPartyOAuthTestMixinFacebook, ThirdPartyOAuthTestMixinGoogle
from ..forms import AccessTokenExchangeForm
from .utils import AccessTokenExchangeTestMixin
from .mixins import DOPAdapterMixin, DOTAdapterMixin
class AccessTokenExchangeFormTest(AccessTokenExchangeTestMixin):
"""
Mixin that defines test cases for AccessTokenExchangeForm
"""
def setUp(self):
super(AccessTokenExchangeFormTest, self).setUp()
self.request = RequestFactory().post("dummy_url")
redirect_uri = 'dummy_redirect_url'
SessionMiddleware().process_request(self.request)
self.request.social_strategy = social_utils.load_strategy(self.request)
# pylint: disable=no-member
self.request.backend = social_utils.load_backend(self.request.social_strategy, self.BACKEND, redirect_uri)
def _assert_error(self, data, expected_error, expected_error_description):
form = AccessTokenExchangeForm(request=self.request, oauth2_adapter=self.oauth2_adapter, data=data)
self.assertEqual(
form.errors,
{"error": expected_error, "error_description": expected_error_description}
)
self.assertNotIn("partial_pipeline", self.request.session)
def _assert_success(self, data, expected_scopes):
form = AccessTokenExchangeForm(request=self.request, oauth2_adapter=self.oauth2_adapter, data=data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["user"], self.user)
self.assertEqual(form.cleaned_data["client"], self.oauth_client)
self.assertEqual(scope.to_names(form.cleaned_data["scope"]), expected_scopes)
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
@httpretty.activate
class DOPAccessTokenExchangeFormTestFacebook(
DOPAdapterMixin,
AccessTokenExchangeFormTest,
ThirdPartyOAuthTestMixinFacebook,
TestCase,
):
"""
Tests for AccessTokenExchangeForm used with Facebook, tested against
django-oauth2-provider (DOP).
"""
pass
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
@httpretty.activate
class DOTAccessTokenExchangeFormTestFacebook(
DOTAdapterMixin,
AccessTokenExchangeFormTest,
ThirdPartyOAuthTestMixinFacebook,
TestCase,
):
"""
Tests for AccessTokenExchangeForm used with Facebook, tested against
django-oauth-toolkit (DOT).
"""
pass
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
@httpretty.activate
class DOPAccessTokenExchangeFormTestGoogle(
DOPAdapterMixin,
AccessTokenExchangeFormTest,
ThirdPartyOAuthTestMixinGoogle,
TestCase,
):
"""
Tests for AccessTokenExchangeForm used with Google, tested against
django-oauth2-provider (DOP).
"""
pass
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
@httpretty.activate
class DOTAccessTokenExchangeFormTestGoogle(
DOTAdapterMixin,
AccessTokenExchangeFormTest,
ThirdPartyOAuthTestMixinGoogle,
TestCase,
):
"""
Tests for AccessTokenExchangeForm used with Google, tested against
django-oauth-toolkit (DOT).
"""
pass
| agpl-3.0 |
Em-Pan/swift | test/unit/proxy/test_mem_server.py | 32 | 1731 | # Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.unit.proxy import test_server
from test.unit.proxy.test_server import teardown
from swift.obj import mem_server
def setup():
test_server.do_setup(mem_server)
class TestController(test_server.TestController):
pass
class TestProxyServer(test_server.TestProxyServer):
pass
class TestObjectController(test_server.TestObjectController):
def test_PUT_no_etag_fallocate(self):
# mem server doesn't call fallocate(), believe it or not
pass
# these tests all go looking in the filesystem
def test_policy_IO(self):
pass
def test_PUT_ec(self):
pass
def test_PUT_ec_multiple_segments(self):
pass
def test_PUT_ec_fragment_archive_etag_mismatch(self):
pass
class TestContainerController(test_server.TestContainerController):
pass
class TestAccountController(test_server.TestAccountController):
pass
class TestAccountControllerFakeGetResponse(
test_server.TestAccountControllerFakeGetResponse):
pass
if __name__ == '__main__':
setup()
try:
unittest.main()
finally:
teardown()
| apache-2.0 |
stefanw/dataset | dataset/persistence/table.py | 1 | 18458 | import logging
from hashlib import sha1
from sqlalchemy.sql import and_, expression
from sqlalchemy.sql.expression import ClauseElement
from sqlalchemy.schema import Column, Index
from sqlalchemy import alias, false
from dataset.persistence.util import guess_type, normalize_column_name
from dataset.persistence.util import ResultIter
from dataset.util import DatasetException
log = logging.getLogger(__name__)
class Table(object):
"""Represents a table in a database and exposes common operations."""
def __init__(self, database, table):
"""Initialise the table from database schema."""
self.indexes = dict((i.name, i) for i in table.indexes)
self.database = database
self.table = table
self._is_dropped = False
@property
def columns(self):
"""Get a listing of all columns that exist in the table."""
return list(self.table.columns.keys())
@property
def _normalized_columns(self):
return map(normalize_column_name, self.columns)
def drop(self):
"""
Drop the table from the database.
Delete both the schema and all the contents within it.
Note: the object will raise an Exception if you use it after
dropping the table. If you want to re-create the table, make
sure to get a fresh instance from the :py:class:`Database <dataset.Database>`.
"""
self.database._acquire()
self._is_dropped = True
self.database._tables.pop(self.table.name, None)
self.table.drop(self.database.engine)
self.database._release()
return True
def _check_dropped(self):
if self._is_dropped:
raise DatasetException('the table has been dropped. this object should not be used again.')
def _prune_row(self, row):
"""Remove keys from row not in column set."""
# normalize keys
row = {normalize_column_name(k): v for k, v in row.items()}
# filter out keys not in column set
return {k: row[k] for k in row if k in self._normalized_columns}
def insert(self, row, ensure=None, types={}):
"""
Add a row (type: dict) by inserting it into the table.
If ``ensure`` is set, any of the keys of the row are not
table columns, they will be created automatically.
During column creation, ``types`` will be checked for a key
matching the name of a column to be created, and the given
SQLAlchemy column type will be used. Otherwise, the type is
guessed from the row value, defaulting to a simple unicode
field.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key.
"""
self._check_dropped()
ensure = self.database.ensure_schema if ensure is None else ensure
if ensure:
self._ensure_columns(row, types=types)
else:
row = self._prune_row(row)
res = self.database.executable.execute(self.table.insert(row))
if len(res.inserted_primary_key) > 0:
return res.inserted_primary_key[0]
def insert_ignore(self, row, keys, ensure=None, types={}):
"""
Add a row (type: dict) into the table if the row does not exist.
If rows with matching ``keys`` exist they will be added to the table.
Setting ``ensure`` results in automatically creating missing columns,
i.e., keys of the row are not table columns.
During column creation, ``types`` will be checked for a key
matching the name of a column to be created, and the given
SQLAlchemy column type will be used. Otherwise, the type is
guessed from the row value, defaulting to a simple unicode
field.
::
data = dict(id=10, title='I am a banana!')
table.insert_ignore(data, ['id'])
"""
row, res = self._upsert_pre_check(row, keys, ensure)
if res is None:
return self.insert(row, ensure=ensure, types=types)
else:
return False
def insert_many(self, rows, chunk_size=1000, ensure=None, types={}):
"""
Add many rows at a time.
This is significantly faster than adding them one by one. Per default
the rows are processed in chunks of 1000 per commit, unless you specify
a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows)
"""
ensure = self.database.ensure_schema if ensure is None else ensure
def _process_chunk(chunk):
if ensure:
for row in chunk:
self._ensure_columns(row, types=types)
else:
chunk = [self._prune_row(r) for r in chunk]
self.table.insert().execute(chunk)
self._check_dropped()
chunk = []
for i, row in enumerate(rows, start=1):
chunk.append(row)
if i % chunk_size == 0:
_process_chunk(chunk)
chunk = []
if chunk:
_process_chunk(chunk)
def update(self, row, keys, ensure=None, types={}):
"""
Update a row in the table.
The update is managed via the set of column names stated in ``keys``:
they will be used as filters for the data to be updated, using the values
in ``row``.
::
# update all entries with id matching 10, setting their title columns
data = dict(id=10, title='I am a banana!')
table.update(data, ['id'])
If keys in ``row`` update columns not present in the table,
they will be created based on the settings of ``ensure`` and
``types``, matching the behavior of :py:meth:`insert() <dataset.Table.insert>`.
"""
# check whether keys arg is a string and format as a list
if not isinstance(keys, (list, tuple)):
keys = [keys]
self._check_dropped()
if not keys or len(keys) == len(row):
return False
clause = [(u, row.get(u)) for u in keys]
ensure = self.database.ensure_schema if ensure is None else ensure
if ensure:
self._ensure_columns(row, types=types)
else:
row = self._prune_row(row)
# Don't update the key itself, so remove any keys from the row dict
clean_row = row.copy()
for key in keys:
if key in clean_row.keys():
del clean_row[key]
try:
filters = self._args_to_clause(dict(clause))
stmt = self.table.update(filters, clean_row)
rp = self.database.executable.execute(stmt)
return rp.rowcount
except KeyError:
return 0
def _upsert_pre_check(self, row, keys, ensure):
# check whether keys arg is a string and format as a list
if not isinstance(keys, (list, tuple)):
keys = [keys]
self._check_dropped()
ensure = self.database.ensure_schema if ensure is None else ensure
if ensure:
self.create_index(keys)
else:
row = self._prune_row(row)
filters = {}
for key in keys:
filters[key] = row.get(key)
return row, self.find_one(**filters)
def upsert(self, row, keys, ensure=None, types={}):
"""
An UPSERT is a smart combination of insert and update.
If rows with matching ``keys`` exist they will be updated, otherwise a
new row is inserted in the table.
::
data = dict(id=10, title='I am a banana!')
table.upsert(data, ['id'])
"""
row, res = self._upsert_pre_check(row, keys, ensure)
if res is None:
return self.insert(row, ensure=ensure, types=types)
else:
row_count = self.update(row, keys, ensure=ensure, types=types)
try:
result = (row_count > 0, res['id'])[row_count == 1]
except KeyError:
result = row_count > 0
return result
def delete(self, *_clauses, **_filter):
"""
Delete rows from the table.
Keyword arguments can be used to add column-based filters. The filter
criterion will always be equality:
.. code-block:: python
table.delete(place='Berlin')
If no arguments are given, all records are deleted.
"""
self._check_dropped()
if _filter:
q = self._args_to_clause(_filter, clauses=_clauses)
stmt = self.table.delete(q)
else:
stmt = self.table.delete()
rows = self.database.executable.execute(stmt)
return rows.rowcount > 0
def _has_column(self, column):
return normalize_column_name(column) in self._normalized_columns
def _ensure_columns(self, row, types={}):
# Keep order of inserted columns
for column in row.keys():
if self._has_column(column):
continue
if column in types:
_type = types[column]
else:
_type = guess_type(row[column])
log.debug("Creating column: %s (%s) on %r" % (column,
_type, self.table.name))
self.create_column(column, _type)
def _args_to_clause(self, args, ensure=None, clauses=()):
ensure = self.database.ensure_schema if ensure is None else ensure
if ensure:
self._ensure_columns(args)
clauses = list(clauses)
for k, v in args.items():
if not self._has_column(k):
clauses.append(false())
elif isinstance(v, (list, tuple)):
clauses.append(self.table.c[k].in_(v))
else:
clauses.append(self.table.c[k] == v)
return and_(*clauses)
def create_column(self, name, type):
"""
Explicitly create a new column ``name`` of a specified type.
``type`` must be a `SQLAlchemy column type <http://docs.sqlalchemy.org/en/rel_0_8/core/types.html>`_.
::
table.create_column('created_at', sqlalchemy.DateTime)
"""
self._check_dropped()
self.database._acquire()
try:
if normalize_column_name(name) not in self._normalized_columns:
self.database.op.add_column(
self.table.name,
Column(name, type),
self.table.schema
)
self.table = self.database.update_table(self.table.name)
finally:
self.database._release()
def drop_column(self, name):
"""
Drop the column ``name``.
::
table.drop_column('created_at')
"""
if self.database.engine.dialect.name == 'sqlite':
raise NotImplementedError("SQLite does not support dropping columns.")
self._check_dropped()
self.database._acquire()
try:
if name in self.table.columns.keys():
self.database.op.drop_column(
self.table.name,
name,
self.table.schema
)
self.table = self.database.update_table(self.table.name)
finally:
self.database._release()
def create_index(self, columns, name=None, **kw):
"""
Create an index to speed up queries on a table.
If no ``name`` is given a random name is created.
::
table.create_index(['name', 'country'])
"""
self._check_dropped()
if not name:
sig = '||'.join(columns)
# This is a work-around for a bug in <=0.6.1 which would create
# indexes based on hash() rather than a proper hash.
key = abs(hash(sig))
name = 'ix_%s_%s' % (self.table.name, key)
if name in self.indexes:
return self.indexes[name]
key = sha1(sig.encode('utf-8')).hexdigest()[:16]
name = 'ix_%s_%s' % (self.table.name, key)
if name in self.indexes:
return self.indexes[name]
try:
self.database._acquire()
columns = [self.table.c[c] for c in columns]
idx = Index(name, *columns, **kw)
idx.create(self.database.engine)
except:
idx = None
finally:
self.database._release()
self.indexes[name] = idx
return idx
def find_one(self, *args, **kwargs):
"""
Get a single result from the table.
Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or None.
::
row = table.find_one(country='United States')
"""
kwargs['_limit'] = 1
iterator = self.find(*args, **kwargs)
try:
return next(iterator)
except StopIteration:
return None
def _args_to_order_by(self, order_by):
if order_by[0] == '-':
return self.table.c[order_by[1:]].desc()
else:
return self.table.c[order_by].asc()
def find(self, *_clauses, **kwargs):
"""
Perform a simple search on the table.
Simply pass keyword arguments as ``filter``.
::
results = table.find(country='France')
results = table.find(country='France', year=1980)
Using ``_limit``::
# just return the first 10 rows
results = table.find(country='France', _limit=10)
You can sort the results by single or multiple columns. Append a minus sign
to the column name for descending order::
# sort results by a column 'year'
results = table.find(country='France', order_by='year')
# return all rows sorted by multiple columns (by year in descending order)
results = table.find(order_by=['country', '-year'])
For more complex queries, please use :py:meth:`db.query() <dataset.Database.query>`
instead.
"""
_limit = kwargs.pop('_limit', None)
_offset = kwargs.pop('_offset', 0)
_step = kwargs.pop('_step', 5000)
order_by = kwargs.pop('order_by', 'id')
return_count = kwargs.pop('return_count', False)
return_query = kwargs.pop('return_query', False)
_filter = kwargs
self._check_dropped()
if not isinstance(order_by, (list, tuple)):
order_by = [order_by]
order_by = [o for o in order_by if (o.startswith('-') and o[1:] or o) in self.table.columns]
order_by = [self._args_to_order_by(o) for o in order_by]
args = self._args_to_clause(_filter, ensure=False, clauses=_clauses)
# query total number of rows first
count_query = alias(self.table.select(whereclause=args, limit=_limit, offset=_offset),
name='count_query_alias').count()
rp = self.database.executable.execute(count_query)
total_row_count = rp.fetchone()[0]
if return_count:
return total_row_count
if _limit is None:
_limit = total_row_count
if _step is None or _step is False or _step == 0:
_step = total_row_count
query = self.table.select(whereclause=args, limit=_limit,
offset=_offset, order_by=order_by)
if return_query:
return query
return ResultIter(self.database.executable.execute(query),
row_type=self.database.row_type, step=_step)
def count(self, *args, **kwargs):
"""Return the count of results for the given filter set."""
return self.find(*args, return_count=True, **kwargs)
def __len__(self):
"""Return the number of rows in the table."""
return self.count()
def distinct(self, *args, **_filter):
"""
Return all rows of a table, but remove rows in with duplicate values in ``columns``.
Interally this creates a `DISTINCT statement <http://www.w3schools.com/sql/sql_distinct.asp>`_.
::
# returns only one row per year, ignoring the rest
table.distinct('year')
# works with multiple columns, too
table.distinct('year', 'country')
# you can also combine this with a filter
table.distinct('year', country='China')
"""
self._check_dropped()
qargs = []
columns = []
try:
for c in args:
if isinstance(c, ClauseElement):
qargs.append(c)
else:
columns.append(self.table.c[c])
for col, val in _filter.items():
qargs.append(self.table.c[col] == val)
except KeyError:
return []
q = expression.select(columns, distinct=True,
whereclause=and_(*qargs),
order_by=[c.asc() for c in columns])
return self.database.query(q)
def __getitem__(self, item):
"""
Get distinct column values.
This is an alias for distinct which allows the table to be queried as using
square bracket syntax.
::
# Same as distinct:
print list(table['year'])
"""
if not isinstance(item, tuple):
item = item,
return self.distinct(*item)
def all(self):
"""
Return all rows of the table as simple dictionaries.
This is simply a shortcut to *find()* called with no arguments.
::
rows = table.all()
"""
return self.find()
def __iter__(self):
"""
Return all rows of the table as simple dictionaries.
Allows for iterating over all rows in the table without explicetly
calling :py:meth:`all() <dataset.Table.all>`.
::
for row in table:
print(row)
"""
return self.all()
def __repr__(self):
"""Get table representation."""
return '<Table(%s)>' % self.table.name
| mit |
mmauroy/SickRage | lib/sqlalchemy/sql/dml.py | 78 | 29493 | # sql/dml.py
# Copyright (C) 2009-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`.
"""
from .base import Executable, _generative, _from_objects, DialectKWArgs
from .elements import ClauseElement, _literal_as_text, Null, and_, _clone
from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes
from .. import util
from .. import exc
class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
_hints = util.immutabledict()
_prefixes = ()
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict(
(c.key, pval)
for c, pval in zip(self.table.c, p)
)
else:
return p
if isinstance(parameters, (list, tuple)) and \
parameters and \
isinstance(parameters[0], (list, tuple, dict)):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets.")
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
"""Add a :term:`RETURNING` or equivalent clause to this statement.
e.g.::
stmt = table.update().\\
where(table.c.data == 'value').\\
values(status='X').\\
returning(table.c.server_flag, table.c.updated_timestamp)
for server_flag, updated_timestamp in connection.execute(stmt):
print(server_flag, updated_timestamp)
The given collection of column expressions should be derived from
the table that is
the target of the INSERT, UPDATE, or DELETE. While :class:`.Column`
objects are typical, the elements can also be expressions::
stmt = table.insert().returning(
(table.c.first_name + " " + table.c.last_name).label('fullname')
)
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using :meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
.. seealso::
:meth:`.ValuesBase.return_defaults` - an alternative method tailored
towards efficient fetching of server-side defaults and triggers
for single-row INSERTs or UPDATEs.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union(
{(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
_supports_multi_parameters = False
_has_multi_parameters = False
select = None
def __init__(self, table, values, prefixes):
self.table = _interpret_as_from(table)
self.parameters, self._has_multi_parameters = \
self._process_colparams(values)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`.Insert` and :class:`.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`.Connection.execute`. However,
the :meth:`.ValuesBase.values` method can be used to "fix" a particular
set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: Alternatively, a dictionary, tuple or list
of dictionaries or tuples can be passed as a single positional
argument in order to form the VALUES or
SET clause of the statement. The single dictionary form
works the same as the kwargs form::
users.insert().values({"name": "some name"})
If a tuple is passed, the tuple should contain the same number
of columns as the target :class:`.Table`::
users.insert().values((5, "some name"))
The :class:`.Insert` construct also supports multiply-rendered VALUES
construct, for those backends which support this SQL syntax
(SQLite, Postgresql, MySQL). This mode is indicated by passing a list
of one or more dictionaries/tuples::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
In the case of an :class:`.Update`
construct, only the single dictionary/tuple form is accepted,
else an exception is raised. It is also an exception case to
attempt to mix the single-/multiple- value styles together,
either through multiple :meth:`.ValuesBase.values` calls
or by sending a list + kwargs at the same time.
.. note::
Passing a multiple values list is *not* the same
as passing a multiple values list to the :meth:`.Connection.execute`
method. Passing a list of parameter sets to :meth:`.ValuesBase.values`
produces a construct of this form::
INSERT INTO table (col1, col2, col3) VALUES
(col1_0, col2_0, col3_0),
(col1_1, col2_1, col3_1),
...
whereas a multiple list passed to :meth:`.Connection.execute`
has the effect of using the DBAPI
`executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
method, which provides a high-performance system of invoking
a single-row INSERT statement many times against a series
of parameter sets. The "executemany" style is supported by
all database backends, as it does not depend on a special SQL
syntax.
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if self.select is not None:
raise exc.InvalidRequestError(
"This construct already inserts from a SELECT")
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets.")
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.")
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters, self._has_multi_parameters = \
self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaenously")
else:
self.parameters.update(kwargs)
@_generative
def return_defaults(self, *cols):
"""Make use of a :term:`RETURNING` clause for the purpose
of fetching server-side expressions and defaults.
E.g.::
stmt = table.insert().values(data='newdata').return_defaults()
result = connection.execute(stmt)
server_created_at = result.returned_defaults['created_at']
When used against a backend that supports RETURNING, all column
values generated by SQL expression or server-side-default will be added
to any existing RETURNING clause, provided that
:meth:`.UpdateBase.returning` is not used simultaneously. The column values
will then be available on the result using the
:attr:`.ResultProxy.returned_defaults` accessor as a
dictionary, referring to values keyed to the :class:`.Column` object
as well as its ``.key``.
This method differs from :meth:`.UpdateBase.returning` in these ways:
1. :meth:`.ValuesBase.return_defaults` is only intended for use with
an INSERT or an UPDATE statement that matches exactly one row.
While the RETURNING construct in the general sense supports multiple
rows for a multi-row UPDATE or DELETE statement, or for special
cases of INSERT that return multiple rows (e.g. INSERT from SELECT,
multi-valued VALUES clause), :meth:`.ValuesBase.return_defaults`
is intended only
for an "ORM-style" single-row INSERT/UPDATE statement. The row
returned by the statement is also consumed implcitly when
:meth:`.ValuesBase.return_defaults` is used. By contrast,
:meth:`.UpdateBase.returning` leaves the RETURNING result-set intact
with a collection of any number of rows.
2. It is compatible with the existing logic to fetch auto-generated
primary key values, also known as "implicit returning". Backends that
support RETURNING will automatically make use of RETURNING in order
to fetch the value of newly generated primary keys; while the
:meth:`.UpdateBase.returning` method circumvents this behavior,
:meth:`.ValuesBase.return_defaults` leaves it intact.
3. It can be called against any backend. Backends that don't support
RETURNING will skip the usage of the feature, rather than raising
an exception. The return value of :attr:`.ResultProxy.returned_defaults`
will be ``None``
:meth:`.ValuesBase.return_defaults` is used by the ORM to provide
an efficient implementation for the ``eager_defaults`` feature of
:func:`.mapper`.
:param cols: optional list of column key names or :class:`.Column`
objects. If omitted, all column expressions evaulated on the server
are added to the returning list.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.UpdateBase.returning`
:attr:`.ResultProxy.returned_defaults`
"""
self._return_defaults = cols or True
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_supports_multi_parameters = True
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Insert` object.
Similar functionality is available via the
:meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: :class:`.TableClause` which is the subject of the insert.
:param values: collection of values to be inserted; see
:meth:`.Insert.values` for a description of allowed formats here.
Can be omitted entirely; a :class:`.Insert` construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = self.select_names = None
self.inline = inline
self._returning = returning
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
@_generative
def from_select(self, names, select):
"""Return a new :class:`.Insert` construct which represents
an ``INSERT...FROM SELECT`` statement.
e.g.::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert().from_select(['a', 'b'], sel)
:param names: a sequence of string column names or :class:`.Column`
objects representing the target columns.
:param select: a :func:`.select` construct, :class:`.FromClause`
or other construct which resolves into a :class:`.FromClause`,
such as an ORM :class:`.Query` object, etc. The order of
columns returned from this FROM clause should correspond to the
order of columns sent as the ``names`` parameter; while this
is not checked before passing along to the database, the database
would normally raise an exception if these column lists don't
correspond.
.. note::
Depending on backend, it may be necessary for the :class:`.Insert`
statement to be constructed using the ``inline=True`` flag; this
flag will prevent the implicit usage of ``RETURNING`` when the
``INSERT`` statement is rendered, which isn't supported on a backend
such as Oracle in conjunction with an ``INSERT..SELECT`` combination::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert(inline=True).from_select(['a', 'b'], sel)
.. note::
A SELECT..INSERT construct in SQL has no VALUES clause. Therefore
:class:`.Column` objects which utilize Python-side defaults
(e.g. as described at :ref:`metadata_defaults_toplevel`)
will **not** take effect when using :meth:`.Insert.from_select`.
.. versionadded:: 0.8.3
"""
if self.parameters:
raise exc.InvalidRequestError(
"This construct already inserts value expressions")
self.parameters, self._has_multi_parameters = \
self._process_colparams(dict((n, Null()) for n in names))
self.select_names = names
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
if self.select is not None:
self.select = _clone(self.select)
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause=None,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Update` object.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\\
values(name='user #5')
Similar functionality is available via the
:meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\\
where(users.c.id==5).\\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause=None,
bind=None,
returning=None,
prefixes=None,
**dialect_kw):
"""Construct :class:`.Delete` object.
Similar functionality is available via the
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
self._bind = bind
self.table = _interpret_as_from(table)
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self._validate_dialect_kwargs(dialect_kw)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
| gpl-3.0 |
Damnever/pigar | setup.py | 1 | 2469 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import codecs
from setuptools import setup, find_packages
version = ''
with open('pigar/version.py', 'r') as f:
version = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.M
).group(1)
if not version:
raise RuntimeError('Cannot find version information')
long_description = """
[](https://github.com/damnever/pigar/actions)
- Generating requirements.txt for Python project.
- Handling the difference between different Python versions.
- Jupyter notebook (`*.ipynb`) support.
- Including the import statements from `exec`/`eval`, doctest of docstring, etc.
- Searching packages by import name.
- Checking the latest versions for Python project.
You can find more information on [GitHub](https://github.com/damnever/pigar).
""" # noqa
with codecs.open('CHANGELOG.md', encoding='utf-8') as f:
change_logs = f.read()
install_requires = [
'colorama>=0.3.9', 'requests>=2.20.0', 'nbformat>=4.4.0',
'futures;python_version<"3.2"'
]
setup(
name='pigar',
version=version,
description=(
'A fantastic tool to generate requirements for your'
' Python project, and more than that.'
),
long_description=long_description + '\n\n' + change_logs,
long_description_content_type="text/markdown",
url='https://github.com/damnever/pigar',
author='damnever',
author_email='dxc.wolf@gmail.com',
license='The BSD 3-Clause License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3',
],
keywords='requirements.txt,automation,tool,module-search',
packages=find_packages(),
install_requires=install_requires,
include_package_data=True,
entry_points={'console_scripts': [
'pigar=pigar.__main__:main',
]},
)
| bsd-3-clause |
a1ezzz/wasp-backup | wasp_backup/apps.py | 1 | 7497 | # -*- coding: utf-8 -*-
# wasp_backup/apps.py
#
# Copyright (C) 2017 the wasp-backup authors and contributors
# <see AUTHORS file>
#
# This file is part of wasp-backup.
#
# wasp-backup is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wasp-backup is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with wasp-backup. If not, see <http://www.gnu.org/licenses/>.
# TODO: document the code
# TODO: write tests for the code
# noinspection PyUnresolvedReferences
from wasp_backup.version import __author__, __version__, __credits__, __license__, __copyright__, __email__
# noinspection PyUnresolvedReferences
from wasp_backup.version import __status__
from wasp_general.task.scheduler.task_source import WInstantTaskSource
from wasp_general.cli.formatter import na_formatter
from wasp_launcher.core import WAppsGlobals
from wasp_launcher.core_scheduler import WSchedulerTaskSourceInstaller, WLauncherTaskSource
from wasp_launcher.core_broker import WCommandKit, WBrokerCommand, WResponsiveBrokerCommand
from wasp_backup.core import WBackupMeta
from wasp_backup.file_backup import WFileBackupCommand
from wasp_backup.check import WCheckBackupCommand
from wasp_backup.program_backup import WProgramBackupCommand
from wasp_backup.retention import WRetentionBackupCommand
class WResponsiveCreateBackupCommand(WResponsiveBrokerCommand):
class FileBackupCommand(WFileBackupCommand, WBrokerCommand):
def __init__(self):
WFileBackupCommand.__init__(self, WAppsGlobals.log)
WBrokerCommand.__init__(
self, self.command_token(), *self.argument_descriptors(),
relationships=self.relationships()
)
def brief_description(self):
return self.__description__
class ScheduledTask(WResponsiveBrokerCommand.ScheduledTask):
def state_details(self):
archiver = self.basic_command().archiver()
if archiver is not None:
return 'Archiving is not running. May be finalizing'
result = 'Archiving file: %s' % na_formatter(archiver.last_file())
details = archiver.archiving_details()
if details is not None:
result += '\n' + details
return result
def thread_started(self):
self.basic_command().stop_event(self.stop_event())
WResponsiveBrokerCommand.ScheduledTask.thread_started(self)
__task_source_name__ = WBackupMeta.__task_source_name__
__scheduler_instance__ = WBackupMeta.__scheduler_instance_name__
def __init__(self):
WResponsiveBrokerCommand.__init__(
self, WResponsiveCreateBackupCommand.FileBackupCommand(),
scheduled_task_cls=WResponsiveCreateBackupCommand.ScheduledTask
)
class WResponsiveCheckBackupCommand(WResponsiveBrokerCommand):
class CheckBackupCommand(WCheckBackupCommand, WBrokerCommand):
def __init__(self):
WCheckBackupCommand.__init__(self, WAppsGlobals.log)
WBrokerCommand.__init__(
self, self.command_token(), *self.argument_descriptors(),
relationships=self.relationships()
)
def brief_description(self):
return self.__description__
class ScheduledTask(WResponsiveBrokerCommand.ScheduledTask):
def state_details(self):
checker = self.basic_command().checker()
if checker is not None:
details = checker.check_details()
if details is not None:
return '\n' + details
return 'Checking is not running. May be finalizing'
def thread_started(self):
self.basic_command().stop_event(self.stop_event())
WResponsiveBrokerCommand.ScheduledTask.thread_started(self)
__task_source_name__ = WBackupMeta.__task_source_name__
__scheduler_instance__ = WBackupMeta.__scheduler_instance_name__
def __init__(self):
WResponsiveBrokerCommand.__init__(
self, WResponsiveCheckBackupCommand.CheckBackupCommand(),
scheduled_task_cls=WResponsiveCheckBackupCommand.ScheduledTask
)
class WResponsiveProgramBackupCommand(WResponsiveBrokerCommand):
class ProgramBackupCommand(WProgramBackupCommand, WBrokerCommand):
def __init__(self):
WProgramBackupCommand.__init__(self, WAppsGlobals.log)
WBrokerCommand.__init__(
self, self.command_token(), *self.argument_descriptors(),
relationships=self.relationships()
)
def brief_description(self):
return self.__description__
class ScheduledTask(WResponsiveBrokerCommand.ScheduledTask):
def state_details(self):
archiver = self.basic_command().archiver()
if archiver is not None:
return 'Archiving is not running. May be finalizing'
result = ''
details = archiver.archiving_details()
if details is not None:
result += '\n' + details
return result
def thread_started(self):
self.basic_command().stop_event(self.stop_event())
WResponsiveBrokerCommand.ScheduledTask.thread_started(self)
__task_source_name__ = WBackupMeta.__task_source_name__
__scheduler_instance__ = WBackupMeta.__scheduler_instance_name__
def __init__(self):
WResponsiveBrokerCommand.__init__(
self, WResponsiveProgramBackupCommand.ProgramBackupCommand(),
scheduled_task_cls=WResponsiveProgramBackupCommand.ScheduledTask
)
class WResponsiveRetentionCommand(WResponsiveBrokerCommand):
class RetentionCommand(WRetentionBackupCommand, WBrokerCommand):
def __init__(self):
WRetentionBackupCommand.__init__(self, WAppsGlobals.log)
WBrokerCommand.__init__(
self, self.command_token(), *self.argument_descriptors(),
relationships=self.relationships()
)
def brief_description(self):
return self.__description__
class ScheduledTask(WResponsiveBrokerCommand.ScheduledTask):
def state_details(self):
# TODO: make it more descriptive!
return 'Task is running?!'
def thread_started(self):
# TODO: make it more responsive!
#self.basic_command().stop_event(self.stop_event())
WResponsiveBrokerCommand.ScheduledTask.thread_started(self)
__task_source_name__ = WBackupMeta.__task_source_name__
__scheduler_instance__ = WBackupMeta.__scheduler_instance_name__
def __init__(self):
WResponsiveBrokerCommand.__init__(
self, WResponsiveRetentionCommand.RetentionCommand(),
scheduled_task_cls=WResponsiveRetentionCommand.ScheduledTask
)
class WBackupBrokerCommandKit(WCommandKit):
__registry_tag__ = 'com.binblob.wasp-backup.broker-commands'
@classmethod
def description(cls):
return 'backup creation/restoring commands'
@classmethod
def commands(cls):
return (
WResponsiveCreateBackupCommand(),
WResponsiveCheckBackupCommand(),
WResponsiveProgramBackupCommand(),
WResponsiveRetentionCommand()
)
class WBackupSchedulerInstaller(WSchedulerTaskSourceInstaller):
__scheduler_instance__ = WBackupMeta.__scheduler_instance_name__
class InstantTaskSource(WInstantTaskSource, WLauncherTaskSource):
__task_source_name__ = WBackupMeta.__task_source_name__
def __init__(self, scheduler):
WInstantTaskSource.__init__(self, scheduler)
WLauncherTaskSource.__init__(self)
def name(self):
return self.__task_source_name__
def description(self):
return 'Backup tasks from broker'
__registry_tag__ = 'com.binblob.wasp-backup.scheduler.sources'
def sources(self):
return WBackupSchedulerInstaller.InstantTaskSource,
| lgpl-3.0 |
Cactuslegs/audacity-of-nope | lib-src/lv2/lilv/waflib/Tools/c_osx.py | 329 | 4274 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shutil,sys,platform
from waflib import TaskGen,Task,Build,Options,Utils,Errors
from waflib.TaskGen import taskgen_method,feature,after_method,before_method
app_info='''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>%s</string>
</dict>
</plist>
'''
@feature('c','cxx')
def set_macosx_deployment_target(self):
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET']=self.env['MACOSX_DEPLOYMENT_TARGET']
elif'MACOSX_DEPLOYMENT_TARGET'not in os.environ:
if Utils.unversioned_sys_platform()=='darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET']='.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self,name,out):
bld=self.bld
dir=out.parent.find_or_declare(name)
dir.mkdir()
macos=dir.find_or_declare(['Contents','MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name=out.name
k=name.rfind('.')
if k>=0:
name=name[:k]+'.app'
else:
name=name+'.app'
return name
@feature('cprogram','cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','MacOS',out.name])
self.apptask=self.create_task('macapp',self.link_task.outputs,n1)
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/MacOS/'%name
self.bld.install_files(inst_to,n1,chmod=Utils.O755)
if getattr(self,'mac_resources',None):
res_dir=n1.parent.parent.make_node('Resources')
inst_to=getattr(self,'install_path','/Applications')+'/%s/Resources'%name
for x in self.to_list(self.mac_resources):
node=self.path.find_node(x)
if not node:
raise Errors.WafError('Missing mac_resource %r in %r'%(x,self))
parent=node.parent
if os.path.isdir(node.abspath()):
nodes=node.ant_glob('**')
else:
nodes=[node]
for node in nodes:
rel=node.path_from(parent)
tsk=self.create_task('macapp',node,res_dir.make_node(rel))
self.bld.install_as(inst_to+'/%s'%rel,node)
if getattr(self.bld,'is_install',None):
self.install_task.hasrun=Task.SKIP_ME
@feature('cprogram','cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','Info.plist'])
self.plisttask=plisttask=self.create_task('macplist',[],n1)
if getattr(self,'mac_plist',False):
node=self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code=self.mac_plist
else:
plisttask.code=app_info%self.link_task.outputs[0].name
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/'%name
self.bld.install_files(inst_to,n1)
@feature('cshlib','cxxshlib')
@before_method('apply_link','propagate_uselib_vars')
def apply_bundle(self):
if self.env['MACBUNDLE']or getattr(self,'mac_bundle',False):
self.env['LINKFLAGS_cshlib']=self.env['LINKFLAGS_cxxshlib']=[]
self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['macbundle_PATTERN']
use=self.use=self.to_list(getattr(self,'use',[]))
if not'MACBUNDLE'in use:
use.append('MACBUNDLE')
app_dirs=['Contents','Contents/MacOS','Contents/Resources']
class macapp(Task.Task):
color='PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(),self.outputs[0].abspath())
class macplist(Task.Task):
color='PINK'
ext_in=['.bin']
def run(self):
if getattr(self,'code',None):
txt=self.code
else:
txt=self.inputs[0].read()
self.outputs[0].write(txt)
| gpl-2.0 |
istresearch/traptor | tests/traptor_integration_tests.py | 1 | 2572 | """
Traptor integration tests.
This test file is used to ensure that a Traptor is able to pull rules from Redis and begin collection.
Requirements:
* Docker
* Python 2.7 environment with Redis installed
To run this test do the following:
* Create and activate a virtual environment (venv or Anaconda)
* Rename `traptor.env.sample` to `traptor.env` and fill in all fields. Some defaults are provided.
* Ensure that the Redis database you're using in this file matches the docker-compose.yml file (should be 2)
* Install the requirements: `pip install -r requirements.txt`
* Shut down any running containers: `docker-compose down`
* Start Traptor and Redis: `docker-compose up --build -d`
* Tail the logs: `tail -f logs/traptor.log`
* Run the integration test file: `python tests/traptor_integration_tests.py`
* Stare at log files until you're having fun
If everything is working you should see Traptor get it's rules from Redis and begin processing tweets.
If you'd like to see more of how the sausage is made, change the log level from `INFO` to `DEBUG`.
"""
import os
from redis import StrictRedis, ConnectionError
from time import sleep
HOST_FOR_TESTING = os.getenv('REDIS_HOST', 'localhost')
TRAPTOR_TYPE = os.getenv('TRAPTOR_TYPE', 'track')
TRAPTOR_ID = int(os.getenv('TRAPTOR_ID', 0))
RULE_ID = 12348
# Create a connection to Redis
redis_connection = None
try:
redis_connection = StrictRedis(host=HOST_FOR_TESTING, port=6379, db=2)
except ConnectionError as ce:
print("Unable to connect to {}. Error: {}".format(HOST_FOR_TESTING, ce))
# If we have a connection to Redis, wait 15 seconds and add a rule
if redis_connection is not None:
print("Giving you 15 seconds so you can start tailing the logs")
sleep(15)
# Collect on tweets with the keyword `python`
# Change the rule value if you want to collect on a different keyword or hashtag. Go nuts!
test_track_rule = {
"tag": "Traptor.Test",
"value": "python",
"status": "active",
"description": "Test track rule for python",
"appid": "test-appid",
"date_added": "2017-04-02 16:58:34",
"rule_type": "track",
"rule_id": RULE_ID
}
try:
redis_key = "traptor-{}:{}:{}".format(TRAPTOR_TYPE, TRAPTOR_ID, RULE_ID)
redis_connection.hmset(redis_key, test_track_rule)
print("Rule added")
print("Redis Key: {}".format(redis_key))
print("Rule: {}".format(test_track_rule))
except ConnectionError as ce:
print("Unable to add rule to Redis: {}".format(ce))
| mit |
ohnonot/fbpanel-genmon2 | .config/argparse.py | 490 | 87791 | # Author: Steven J. Bethard <steven.bethard@gmail.com>.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.2.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
set
except NameError:
# for python < 2.4 compatibility (sets module is there since 2.3):
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
sorted
except NameError:
# for python < 2.4 compatibility:
def sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if '-' in prefix_chars:
default_prefix = '-'
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
| mit |
dc3-plaso/plaso | plaso/cli/helpers/manager.py | 1 | 3543 | # -*- coding: utf-8 -*-
"""The CLI arguments helper manager objects."""
from plaso.lib import errors
class ArgumentHelperManager(object):
"""Class that implements the CLI argument helper manager."""
_helper_classes = {}
@classmethod
def AddCommandLineArguments(
cls, argument_group, argument_category=None, module_list=None):
"""Adds command line arguments to a configuration object.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
argument_category (Optional[str]): category of helpers to apply to
the group, such as storage, output, where None will apply the
arguments to all helpers. The category can be used to add arguments
to a specific group of registered helpers.
module_list (Optional[list[str]]): names of argument helpers to apply,
where None will apply the arguments to all helpers.
"""
# Process the helper classes in alphabetical order this is needed to
# keep the argument order consistent.
for _, helper in sorted(cls._helper_classes.items()):
if argument_category and helper.CATEGORY != argument_category:
continue
if module_list and helper.NAME not in module_list:
continue
helper.AddArguments(argument_group)
@classmethod
def DeregisterHelper(cls, helper_class):
"""Deregisters a helper class.
The helper classes are identified based on their lower case name.
Args:
helper_class (type): class object of the argument helper.
Raises:
KeyError: if helper class is not set for the corresponding name.
"""
helper_name = helper_class.NAME.lower()
if helper_name not in cls._helper_classes:
raise KeyError(u'Helper class not set for name: {0:s}.'.format(
helper_class.NAME))
del cls._helper_classes[helper_name]
@classmethod
def GetHelperNames(cls):
"""Retrieves the registered argument helper names.
Returns:
list[str]: sorted argument helper names.
"""
return sorted(cls._helper_classes.keys())
@classmethod
def ParseOptions(cls, options, config_object):
"""Parses and validates arguments using the appropriate helpers.
Args:
options (argparse.Namespace): parser options.
config_object (object): object to be configured by an argument helper.
"""
for helper in iter(cls._helper_classes.values()):
try:
helper.ParseOptions(options, config_object)
except errors.BadConfigObject:
pass
@classmethod
def RegisterHelper(cls, helper_class):
"""Registers a helper class.
The helper classes are identified based on their lower case name.
Args:
helper_class (type): class object of the argument helper.
Raises:
KeyError: if helper class is already set for the corresponding name.
"""
helper_name = helper_class.NAME.lower()
if helper_name in cls._helper_classes:
raise KeyError(u'Helper class already set for name: {0:s}.'.format(
helper_class.NAME))
cls._helper_classes[helper_name] = helper_class
@classmethod
def RegisterHelpers(cls, helper_classes):
"""Registers helper classes.
The helper classes are identified based on their lower case name.
Args:
helper_classes (list[type]): class objects of the argument helpers.
Raises:
KeyError: if helper class is already set for the corresponding name.
"""
for helper_class in helper_classes:
cls.RegisterHelper(helper_class)
| apache-2.0 |
ashishnitinpatil/vnitstudnotifs | django/contrib/gis/db/backends/postgis/adapter.py | 222 | 1557 | """
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from __future__ import unicode_literals
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
class PostGISAdapter(object):
def __init__(self, geom):
"Initializes on the geometry."
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry.
self.ewkb = bytes(geom.ewkb)
self.srid = geom.srid
self._adapter = Binary(self.ewkb)
def __conform__(self, proto):
# Does the given protocol conform to what Psycopg2 expects?
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
if not isinstance(other, PostGISAdapter):
return False
return (self.ewkb == other.ewkb) and (self.srid == other.srid)
def __str__(self):
return self.getquoted()
def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
self._adapter.prepare(conn)
def getquoted(self):
"Returns a properly quoted string for use in PostgreSQL/PostGIS."
# psycopg will figure out whether to use E'\\000' or '\000'
return str('ST_GeomFromEWKB(%s)' % self._adapter.getquoted().decode())
def prepare_database_save(self, unused):
return self
| bsd-3-clause |
dgarciam/Sick-Beard | sickbeard/ui.py | 18 | 5537 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import cherrypy
import sickbeard
MESSAGE = 'notice'
ERROR = 'error'
class Notifications(object):
"""
A queue of Notification objects.
"""
def __init__(self):
self._messages = []
self._errors = []
def message(self, title, message=''):
"""
Add a regular notification to the queue
title: The title of the notification
message: The message portion of the notification
"""
self._messages.append(Notification(title, message, MESSAGE))
def error(self, title, message=''):
"""
Add an error notification to the queue
title: The title of the notification
message: The message portion of the notification
"""
self._errors.append(Notification(title, message, ERROR))
def get_notifications(self):
"""
Return all the available notifications in a list. Marks them all as seen
as it returns them. Also removes timed out Notifications from the queue.
Returns: A list of Notification objects
"""
# filter out expired notifications
self._errors = [x for x in self._errors if not x.is_expired()]
self._messages = [x for x in self._messages if not x.is_expired()]
# return any notifications that haven't been shown to the client already
return [x.see() for x in self._errors + self._messages if x.is_new()]
# static notification queue object
notifications = Notifications()
class Notification(object):
"""
Represents a single notification. Tracks its own timeout and a list of which clients have
seen it before.
"""
def __init__(self, title, message='', type=None, timeout=None):
self.title = title
self.message = message
self._when = datetime.datetime.now()
self._seen = []
if type:
self.type = type
else:
self.type = MESSAGE
if timeout:
self._timeout = timeout
else:
self._timeout = datetime.timedelta(minutes=1)
def is_new(self):
"""
Returns True if the notification hasn't been displayed to the current client (aka IP address).
"""
return cherrypy.request.remote.ip not in self._seen
def is_expired(self):
"""
Returns True if the notification is older than the specified timeout value.
"""
return datetime.datetime.now() - self._when > self._timeout
def see(self):
"""
Returns this notification object and marks it as seen by the client ip
"""
self._seen.append(cherrypy.request.remote.ip)
return self
class ProgressIndicator():
def __init__(self, percentComplete=0, currentStatus={'title': ''}):
self.percentComplete = percentComplete
self.currentStatus = currentStatus
class ProgressIndicators():
_pi = {'massUpdate': [],
'massAdd': [],
'dailyUpdate': []
}
@staticmethod
def getIndicator(name):
if name not in ProgressIndicators._pi:
return []
# if any of the progress indicators are done take them off the list
for curPI in ProgressIndicators._pi[name]:
if curPI != None and curPI.percentComplete() == 100:
ProgressIndicators._pi[name].remove(curPI)
# return the list of progress indicators associated with this name
return ProgressIndicators._pi[name]
@staticmethod
def setIndicator(name, indicator):
ProgressIndicators._pi[name].append(indicator)
class QueueProgressIndicator():
"""
A class used by the UI to show the progress of the queue or a part of it.
"""
def __init__(self, name, queueItemList):
self.queueItemList = queueItemList
self.name = name
def numTotal(self):
return len(self.queueItemList)
def numFinished(self):
return len([x for x in self.queueItemList if not x.isInQueue()])
def numRemaining(self):
return len([x for x in self.queueItemList if x.isInQueue()])
def nextName(self):
for curItem in [sickbeard.showQueueScheduler.action.currentItem]+sickbeard.showQueueScheduler.action.queue: #@UndefinedVariable
if curItem in self.queueItemList:
return curItem.name
return "Unknown"
def percentComplete(self):
numFinished = self.numFinished()
numTotal = self.numTotal()
if numTotal == 0:
return 0
else:
return int(float(numFinished)/float(numTotal)*100)
class LoadingTVShow():
def __init__(self, dir):
self.dir = dir
self.show = None
| gpl-3.0 |
ryanmcgrath/twython | tests/test_auth.py | 10 | 3450 | from twython import Twython, TwythonError, TwythonAuthError
from .config import app_key, app_secret, screen_name, unittest
class TwythonAuthTestCase(unittest.TestCase):
def setUp(self):
self.api = Twython(app_key, app_secret)
self.bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET')
self.bad_api_invalid_tokens = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
'BAD_OT', 'BAD_OTS')
self.oauth2_api = Twython(app_key, app_secret, oauth_version=2)
self.oauth2_bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
oauth_version=2)
@unittest.skip('skipping non-updated test')
def test_get_authentication_tokens(self):
"""Test getting authentication tokens works"""
self.api.get_authentication_tokens(callback_url='http://google.com/',
force_login=True,
screen_name=screen_name)
@unittest.skip('skipping non-updated test')
def test_get_authentication_tokens_bad_tokens(self):
"""Test getting authentication tokens with bad tokens
raises TwythonAuthError"""
self.assertRaises(TwythonAuthError, self.bad_api.get_authentication_tokens,
callback_url='http://google.com/')
@unittest.skip('skipping non-updated test')
def test_get_authorized_tokens_bad_tokens(self):
"""Test getting final tokens fails with wrong tokens"""
self.assertRaises(TwythonError, self.bad_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
@unittest.skip('skipping non-updated test')
def test_get_authorized_tokens_invalid_or_expired_tokens(self):
"""Test getting final token fails when invalid or expired tokens have been passed"""
self.assertRaises(TwythonError, self.bad_api_invalid_tokens.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
@unittest.skip('skipping non-updated test')
def test_get_authentication_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authentication_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authentication_tokens)
@unittest.skip('skipping non-updated test')
def test_get_authorization_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authorized_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
@unittest.skip('skipping non-updated test')
def test_obtain_access_token(self):
"""Test obtaining an Application Only OAuth 2 access token succeeds"""
self.oauth2_api.obtain_access_token()
@unittest.skip('skipping non-updated test')
def test_obtain_access_token_bad_tokens(self):
"""Test obtaining an Application Only OAuth 2 access token using bad app tokens fails"""
self.assertRaises(TwythonAuthError,
self.oauth2_bad_api.obtain_access_token)
@unittest.skip('skipping non-updated test')
def test_obtain_access_token_raises_error_when_oauth1(self):
"""Test when API is set for OAuth 1, obtain_access_token raises a
TwythonError"""
self.assertRaises(TwythonError, self.api.obtain_access_token)
| mit |
aronsky/home-assistant | homeassistant/components/camera/xiaomi.py | 2 | 5498 | """
This component provides support for Xiaomi Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.xiaomi/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.camera import Camera, PLATFORM_SCHEMA
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import (CONF_HOST, CONF_NAME, CONF_PATH,
CONF_PASSWORD, CONF_PORT, CONF_USERNAME)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
DEPENDENCIES = ['ffmpeg']
_LOGGER = logging.getLogger(__name__)
DEFAULT_BRAND = 'Xiaomi Home Camera'
DEFAULT_PATH = '/media/mmcblk0p1/record'
DEFAULT_PORT = 21
DEFAULT_USERNAME = 'root'
CONF_FFMPEG_ARGUMENTS = 'ffmpeg_arguments'
CONF_MODEL = 'model'
MODEL_YI = 'yi'
MODEL_XIAOFANG = 'xiaofang'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MODEL): vol.Any(MODEL_YI,
MODEL_XIAOFANG),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_FFMPEG_ARGUMENTS): cv.string
})
async def async_setup_platform(hass,
config,
async_add_entities,
discovery_info=None):
"""Set up a Xiaomi Camera."""
_LOGGER.debug('Received configuration for model %s', config[CONF_MODEL])
async_add_entities([XiaomiCamera(hass, config)])
class XiaomiCamera(Camera):
"""Define an implementation of a Xiaomi Camera."""
def __init__(self, hass, config):
"""Initialize."""
super().__init__()
self._extra_arguments = config.get(CONF_FFMPEG_ARGUMENTS)
self._last_image = None
self._last_url = None
self._manager = hass.data[DATA_FFMPEG]
self._name = config[CONF_NAME]
self.host = config[CONF_HOST]
self._model = config[CONF_MODEL]
self.port = config[CONF_PORT]
self.path = config[CONF_PATH]
self.user = config[CONF_USERNAME]
self.passwd = config[CONF_PASSWORD]
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def brand(self):
"""Return the camera brand."""
return DEFAULT_BRAND
@property
def model(self):
"""Return the camera model."""
return self._model
def get_latest_video_url(self):
"""Retrieve the latest video file from the Xiaomi Camera FTP server."""
from ftplib import FTP, error_perm
ftp = FTP(self.host)
try:
ftp.login(self.user, self.passwd)
except error_perm as exc:
_LOGGER.error('Camera login failed: %s', exc)
return False
try:
ftp.cwd(self.path)
except error_perm as exc:
_LOGGER.error('Unable to find path: %s - %s', self.path, exc)
return False
dirs = [d for d in ftp.nlst() if '.' not in d]
if not dirs:
_LOGGER.warning("There don't appear to be any folders")
return False
first_dir = dirs[-1]
try:
ftp.cwd(first_dir)
except error_perm as exc:
_LOGGER.error('Unable to find path: %s - %s', first_dir, exc)
return False
if self._model == MODEL_XIAOFANG:
dirs = [d for d in ftp.nlst() if '.' not in d]
if not dirs:
_LOGGER.warning("There don't appear to be any uploaded videos")
return False
latest_dir = dirs[-1]
ftp.cwd(latest_dir)
videos = [v for v in ftp.nlst() if '.tmp' not in v]
if not videos:
_LOGGER.info('Video folder "%s" is empty; delaying', latest_dir)
return False
if self._model == MODEL_XIAOFANG:
video = videos[-2]
else:
video = videos[-1]
return 'ftp://{0}:{1}@{2}:{3}{4}/{5}'.format(
self.user, self.passwd, self.host, self.port, ftp.pwd(), video)
async def async_camera_image(self):
"""Return a still image response from the camera."""
from haffmpeg import ImageFrame, IMAGE_JPEG
url = await self.hass.async_add_job(self.get_latest_video_url)
if url != self._last_url:
ffmpeg = ImageFrame(self._manager.binary, loop=self.hass.loop)
self._last_image = await asyncio.shield(ffmpeg.get_image(
url, output_format=IMAGE_JPEG,
extra_cmd=self._extra_arguments), loop=self.hass.loop)
self._last_url = url
return self._last_image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
from haffmpeg import CameraMjpeg
stream = CameraMjpeg(self._manager.binary, loop=self.hass.loop)
await stream.open_camera(
self._last_url, extra_cmd=self._extra_arguments)
await async_aiohttp_proxy_stream(
self.hass, request, stream,
'multipart/x-mixed-replace;boundary=ffserver')
await stream.close()
| apache-2.0 |
wimleers/fileconveyor | fileconveyor/filter.py | 3 | 7838 | """filter.py Filter class for daemon"""
__author__ = "Wim Leers (work@wimleers.com)"
__version__ = "$Rev$"
__date__ = "$Date$"
__license__ = "GPL"
from sets import Set, ImmutableSet
import re
import types
import os
import os.path
import stat
# Define exceptions.
class FilterError(Exception): pass
class InvalidConditionError(FilterError): pass
class MissingConditionError(FilterError): pass
class InvalidPathsConditionError(InvalidConditionError): pass
class InvalidExtensionsConditionError(InvalidConditionError): pass
class InvalidIgnoredDirsConditionError(InvalidConditionError): pass
class InvalidPatternConditionError(InvalidConditionError): pass
class InvalidSizeConditionError(InvalidConditionError): pass
class MatchError(FilterError): pass
class Filter(object):
"""filter filepaths based on path, file extensions, ignored directories, file pattern and file size"""
valid_conditions = ImmutableSet(["paths", "extensions", "ignoredDirs", "pattern", "size"])
required_sizeconditions = ImmutableSet(["conditionType", "treshold"])
# Prevent forbidden characters in filepaths!
# - Mac OS X: :
# - Linux: /
# - Windows: * " / \ [ ] : ; | = , < >
# It's clear that if your filepaths are valid on Windows, they're valid
# anywhere. So we go with that.
forbidden_characters = {
"paths" : '\*"\[\]:;\|=,<>', # / and \ are allowed
"extensions" : '\*"/\\\[\]:;\|=,<>\.', # / and \ and . are disallowed
"ignoredDirs" : '\*"/\\\[\]:;\|=,<>', # / and \ are disallowed
}
patterns = {
"paths" : re.compile('^(?:([^' + forbidden_characters["paths"] + ']+):)*[^' + forbidden_characters["paths"] + ']+$', re.UNICODE),
"extensions" : re.compile('^(?:([^' + forbidden_characters["extensions"] + ']+):)*[^' + forbidden_characters["extensions"] + ']+$', re.UNICODE),
"ignoredDirs" : re.compile('^(?:([^' + forbidden_characters["ignoredDirs"] + ']+):)*[^' + forbidden_characters["ignoredDirs"] + ']+$', re.UNICODE),
}
def __init__(self, conditions = None):
self.initialized = False
self.conditions = {}
self.pattern = None
if conditions is not None:
self.set_conditions(conditions)
def set_conditions(self, conditions):
"""Validate and then set the conditions of this Filter"""
present_conditions = Set(conditions.keys())
# Ensure all required conditions are set.
if len(conditions) == 0:
raise MissingConditionError("You must set at least one condition.")
# Ensure only valid conditions are set.
if len(present_conditions.difference(self.__class__.valid_conditions)):
raise InvalidConditionError
# Validate conditions. This may trigger exceptions, which should be
# handled by the caller.
self.__validate_conditions(conditions)
# The conditions passed all validation tests: store it.
self.conditions = conditions
# Precompile the pattern condition, if there is one.
if (self.conditions.has_key("pattern")):
self.pattern = re.compile(self.conditions["pattern"], re.UNICODE)
self.initialized = True
return True
def __validate_conditions(self, conditions):
"""Validate a given set of conditions"""
# The paths condition must contain paths separated by colons.
if conditions.has_key("paths"):
if not self.__class__.patterns["paths"].match(conditions["paths"]):
raise InvalidPathsConditionError
# The extensions condition must contain extensions separated by colons.
if conditions.has_key("extensions"):
if not self.__class__.patterns["extensions"].match(conditions["extensions"]):
raise InvalidExtensionsConditionError
# The ignoredDirs condition must contain dirnames separated by colons.
if conditions.has_key("ignoredDirs"):
if not self.__class__.patterns["ignoredDirs"].match(conditions["ignoredDirs"]):
raise InvalidIgnoredDirsConditionError
# If a pattern condition is set, ensure that it's got a valid regular
# expression.
if conditions.has_key("pattern"):
if conditions["pattern"] is None:
raise InvalidPatternConditionError
try:
re.compile(conditions["pattern"], re.UNICODE)
except re.error:
raise InvalidPatternConditionError
# If a size condition is set, ensure that it's got both a size
# condition type and a treshold. And both of them must be valid.
if conditions.has_key("size"):
size = conditions["size"]
if len(self.__class__.required_sizeconditions.difference(size.keys())):
raise InvalidSizeConditionError, "The 'size' condition misses either of 'conditionType' and 'treshold'"
if size["conditionType"] != "minimum" and size["conditionType"] != "maximum":
raise InvalidSizeConditionError, "The 'size' condition has an invalid 'conditionType', valid values are 'maximum' and 'minimum'"
try:
size["treshold"] = int(size["treshold"])
except ValueError:
raise InvalidSizeConditionError, "The 'size' condition has an invalid 'treshold', only integer values are valid'"
def matches(self, filepath, statfunc = os.stat, file_is_deleted = False):
"""Check if the given filepath matches the conditions of this Filter
This function performs the different checks in an order that is
optimized for speed: the conditions that are most likely to reduce
the chance of a match are performed first.
"""
if not self.initialized:
return False
match = True
(root, ext) = os.path.splitext(filepath)
# Step 1: apply the paths condition.
if match and self.conditions.has_key("paths"):
append_slash = lambda path: path + "/"
paths = map(append_slash, self.conditions["paths"].split(":"))
path_found = False
for path in paths:
if root.find(path) > -1:
path_found = True
break
if not path_found:
match = False
# Step 2: apply the extensions condition.
if match and self.conditions.has_key("extensions"):
ext = ext.lstrip(".")
if not ext in self.conditions["extensions"].split(":"):
match = False
# Step 3: apply the ignoredDirs condition.
if match and self.conditions.has_key("ignoredDirs"):
ignored_dirs = Set(self.conditions["ignoredDirs"].split(":"))
dirs = Set(root.split(os.sep))
if len(ignored_dirs.intersection(dirs)):
match = False
# Step 4: apply the pattern condition.
if match and self.conditions.has_key("pattern"):
if not self.pattern.match(filepath):
match = False
# Step 5: apply the size condition, except when file_is_deleted is
# enabled.
# (If a file is deleted, we can no longer check its size and therefor
# we allow this to match.)
if match and self.conditions.has_key("size") and not file_is_deleted:
size = statfunc(filepath)[stat.ST_SIZE]
condition_type = self.conditions["size"]["conditionType"]
treshold = self.conditions["size"]["treshold"]
if condition_type == "minimum" and not treshold < size:
match = False
elif condition_type == "maximum" and not treshold > size:
match = False
return match
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.