repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
akintoey/django | tests/admin_inlines/admin.py | 293 | 5354 | from django import forms
from django.contrib import admin
from .models import (
Author, BinaryTree, CapoFamiglia, Chapter, ChildModel1, ChildModel2,
Consigliere, EditablePKBook, ExtraTerrestrial, Fashionista, Holder,
Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked,
Inner4Tabular, NonAutoPKBook, Novel, ParentModelWithCustomPk, Poll,
Profile, ProfileCollection, Question, ReadOnlyInline, ShoppingWeakness,
Sighting, SomeChildModel, SomeParentModel, SottoCapo, Title,
TitleCollection,
)
site = admin.AdminSite(name="admin")
class BookInline(admin.TabularInline):
model = Author.books.through
class NonAutoPKBookTabularInline(admin.TabularInline):
model = NonAutoPKBook
class NonAutoPKBookStackedInline(admin.StackedInline):
model = NonAutoPKBook
class EditablePKBookTabularInline(admin.TabularInline):
model = EditablePKBook
class EditablePKBookStackedInline(admin.StackedInline):
model = EditablePKBook
class AuthorAdmin(admin.ModelAdmin):
inlines = [BookInline,
NonAutoPKBookTabularInline, NonAutoPKBookStackedInline,
EditablePKBookTabularInline, EditablePKBookStackedInline]
class InnerInline(admin.StackedInline):
model = Inner
can_delete = False
readonly_fields = ('readonly',) # For bug #13174 tests.
class HolderAdmin(admin.ModelAdmin):
class Media:
js = ('my_awesome_admin_scripts.js',)
class ReadOnlyInlineInline(admin.TabularInline):
model = ReadOnlyInline
readonly_fields = ['name']
class InnerInline2(admin.StackedInline):
model = Inner2
class Media:
js = ('my_awesome_inline_scripts.js',)
class InnerInline3(admin.StackedInline):
model = Inner3
class Media:
js = ('my_awesome_inline_scripts.js',)
class TitleForm(forms.ModelForm):
def clean(self):
cleaned_data = self.cleaned_data
title1 = cleaned_data.get("title1")
title2 = cleaned_data.get("title2")
if title1 != title2:
raise forms.ValidationError("The two titles must be the same")
return cleaned_data
class TitleInline(admin.TabularInline):
model = Title
form = TitleForm
extra = 1
class Inner4StackedInline(admin.StackedInline):
model = Inner4Stacked
show_change_link = True
class Inner4TabularInline(admin.TabularInline):
model = Inner4Tabular
show_change_link = True
class Holder4Admin(admin.ModelAdmin):
inlines = [Inner4StackedInline, Inner4TabularInline]
class InlineWeakness(admin.TabularInline):
model = ShoppingWeakness
extra = 1
class QuestionInline(admin.TabularInline):
model = Question
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in QuestionInline'
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
def call_me(self, obj):
return 'Callable in PollAdmin'
class ChapterInline(admin.TabularInline):
model = Chapter
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in ChapterInline'
class NovelAdmin(admin.ModelAdmin):
inlines = [ChapterInline]
class ConsigliereInline(admin.TabularInline):
model = Consigliere
class SottoCapoInline(admin.TabularInline):
model = SottoCapo
class ProfileInline(admin.TabularInline):
model = Profile
extra = 1
# admin for #18433
class ChildModel1Inline(admin.TabularInline):
model = ChildModel1
class ChildModel2Inline(admin.StackedInline):
model = ChildModel2
# admin for #19425 and #18388
class BinaryTreeAdmin(admin.TabularInline):
model = BinaryTree
def get_extra(self, request, obj=None, **kwargs):
extra = 2
if obj:
return extra - obj.binarytree_set.count()
return extra
def get_max_num(self, request, obj=None, **kwargs):
max_num = 3
if obj:
return max_num - obj.binarytree_set.count()
return max_num
# admin for #19524
class SightingInline(admin.TabularInline):
model = Sighting
# admin and form for #18263
class SomeChildModelForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = SomeChildModel
widgets = {
'position': forms.HiddenInput,
}
class SomeChildModelInline(admin.TabularInline):
model = SomeChildModel
form = SomeChildModelForm
site.register(TitleCollection, inlines=[TitleInline])
# Test bug #12561 and #12778
# only ModelAdmin media
site.register(Holder, HolderAdmin, inlines=[InnerInline])
# ModelAdmin and Inline media
site.register(Holder2, HolderAdmin, inlines=[InnerInline2])
# only Inline media
site.register(Holder3, inlines=[InnerInline3])
site.register(Poll, PollAdmin)
site.register(Novel, NovelAdmin)
site.register(Fashionista, inlines=[InlineWeakness])
site.register(Holder4, Holder4Admin)
site.register(Author, AuthorAdmin)
site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline, ReadOnlyInlineInline])
site.register(ProfileCollection, inlines=[ProfileInline])
site.register(ParentModelWithCustomPk, inlines=[ChildModel1Inline, ChildModel2Inline])
site.register(BinaryTree, inlines=[BinaryTreeAdmin])
site.register(ExtraTerrestrial, inlines=[SightingInline])
site.register(SomeParentModel, inlines=[SomeChildModelInline])
site.register([Question, Inner4Stacked, Inner4Tabular])
| bsd-3-clause |
bowang/tensorflow | tensorflow/contrib/estimator/python/estimator/dnn_test.py | 32 | 5283 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import dnn
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
def _dnn_estimator_fn(weight_column=None, label_dimension=1, *args, **kwargs):
"""Returns a DNNEstimator that uses regression_head."""
return dnn.DNNEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension),
*args, **kwargs)
class DNNEstimatorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_estimator_fn)
class DNNEstimatorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_estimator_fn)
class DNNEstimatorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_estimator_fn)
class DNNEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNEstimator(
head=head_lib.regression_head(label_dimension=label_dimension),
hidden_units=(2, 2),
feature_columns=feature_columns,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
atsolakid/edx-platform | common/djangoapps/enrollment/api.py | 11 | 14766 | """
Enrollment API for creating, updating, and deleting enrollments. Also provides access to enrollment information at a
course level, such as available course modes.
"""
from django.utils import importlib
import logging
from django.conf import settings
from django.core.cache import cache
from enrollment import errors
log = logging.getLogger(__name__)
DEFAULT_DATA_API = 'enrollment.data'
def get_enrollments(user_id):
"""Retrieves all the courses a user is enrolled in.
Takes a user and retrieves all relative enrollments. Includes information regarding how the user is enrolled
in the the course.
Args:
user_id (str): The username of the user we want to retrieve course enrollment information for.
Returns:
A list of enrollment information for the given user.
Examples:
>>> get_enrollments("Bob")
[
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course": {
"course_id": "edX/DemoX/2014T2",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null
}
],
"invite_only": False
}
},
{
"created": "2014-10-25T20:18:00Z",
"mode": "verified",
"is_active": True,
"user": "Bob",
"course": {
"course_id": "edX/edX-Insider/2014T2",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null
}
],
"invite_only": True
}
}
]
"""
return _data_api().get_course_enrollments(user_id)
def get_enrollment(user_id, course_id):
"""Retrieves all enrollment information for the user in respect to a specific course.
Gets all the course enrollment information specific to a user in a course.
Args:
user_id (str): The user to get course enrollment information for.
course_id (str): The course to get enrollment information for.
Returns:
A serializable dictionary of the course enrollment.
Example:
>>> get_enrollment("Bob", "edX/DemoX/2014T2")
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course": {
"course_id": "edX/DemoX/2014T2",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null
}
],
"invite_only": False
}
}
"""
return _data_api().get_course_enrollment(user_id, course_id)
def add_enrollment(user_id, course_id, mode='honor', is_active=True):
"""Enrolls a user in a course.
Enrolls a user in a course. If the mode is not specified, this will default to 'honor'.
Args:
user_id (str): The user to enroll.
course_id (str): The course to enroll the user in.
mode (str): Optional argument for the type of enrollment to create. Ex. 'audit', 'honor', 'verified',
'professional'. If not specified, this defaults to 'honor'.
is_active (boolean): Optional argument for making the new enrollment inactive. If not specified, is_active
defaults to True.
Returns:
A serializable dictionary of the new course enrollment.
Example:
>>> add_enrollment("Bob", "edX/DemoX/2014T2", mode="audit")
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course": {
"course_id": "edX/DemoX/2014T2",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null
}
],
"invite_only": False
}
}
"""
_validate_course_mode(course_id, mode)
return _data_api().create_course_enrollment(user_id, course_id, mode, is_active)
def update_enrollment(user_id, course_id, mode=None, is_active=None, enrollment_attributes=None):
"""Updates the course mode for the enrolled user.
Update a course enrollment for the given user and course.
Args:
user_id (str): The user associated with the updated enrollment.
course_id (str): The course associated with the updated enrollment.
mode (str): The new course mode for this enrollment.
is_active (bool): Sets whether the enrollment is active or not.
Returns:
A serializable dictionary representing the updated enrollment.
Example:
>>> update_enrollment("Bob", "edX/DemoX/2014T2", "honor")
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course": {
"course_id": "edX/DemoX/2014T2",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null
}
],
"invite_only": False
}
}
"""
if mode is not None:
_validate_course_mode(course_id, mode)
enrollment = _data_api().update_course_enrollment(user_id, course_id, mode=mode, is_active=is_active)
if enrollment is None:
msg = u"Course Enrollment not found for user {user} in course {course}".format(user=user_id, course=course_id)
log.warn(msg)
raise errors.EnrollmentNotFoundError(msg)
else:
if enrollment_attributes is not None:
set_enrollment_attributes(user_id, course_id, enrollment_attributes)
return enrollment
def get_course_enrollment_details(course_id, include_expired=False):
"""Get the course modes for course. Also get enrollment start and end date, invite only, etc.
Given a course_id, return a serializable dictionary of properties describing course enrollment information.
Args:
course_id (str): The Course to get enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary of course enrollment information.
Example:
>>> get_course_enrollment_details("edX/DemoX/2014T2")
{
"course_id": "edX/DemoX/2014T2",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null
}
],
"invite_only": False
}
"""
cache_key = u'enrollment.course.details.{course_id}.{include_expired}'.format(
course_id=course_id,
include_expired=include_expired
)
cached_enrollment_data = None
try:
cached_enrollment_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception (for example, memcache keys that contain spaces)
log.exception(u"Error occurred while retrieving course enrollment details from the cache")
if cached_enrollment_data:
log.info(u"Get enrollment data for course %s (cached)", course_id)
return cached_enrollment_data
course_enrollment_details = _data_api().get_course_enrollment_info(course_id, include_expired)
try:
cache_time_out = getattr(settings, 'ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
cache.set(cache_key, course_enrollment_details, cache_time_out)
except Exception:
# Catch any unexpected errors during caching.
log.exception(u"Error occurred while caching course enrollment details for course %s", course_id)
raise errors.CourseEnrollmentError(u"An unexpected error occurred while retrieving course enrollment details.")
log.info(u"Get enrollment data for course %s", course_id)
return course_enrollment_details
def set_enrollment_attributes(user_id, course_id, attributes):
"""Set enrollment attributes for the enrollment of given user in the
course provided.
Args:
course_id (str): The Course to set enrollment attributes for.
user_id (str): The User to set enrollment attributes for.
attributes (list): Attributes to be set.
Example:
>>>set_enrollment_attributes(
"Bob",
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
)
"""
_data_api().add_or_update_enrollment_attr(user_id, course_id, attributes)
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attributes for given user for provided course.
Args:
user_id: The User to get enrollment attributes for
course_id (str): The Course to get enrollment attributes for.
Example:
>>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015")
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
Returns: list
"""
return _data_api().get_enrollment_attributes(user_id, course_id)
def _validate_course_mode(course_id, mode):
"""Checks to see if the specified course mode is valid for the course.
If the requested course mode is not available for the course, raise an error with corresponding
course enrollment information.
'honor' is special cased. If there are no course modes configured, and the specified mode is
'honor', return true, allowing the enrollment to be 'honor' even if the mode is not explicitly
set for the course.
Args:
course_id (str): The course to check against for available course modes.
mode (str): The slug for the course mode specified in the enrollment.
Returns:
None
Raises:
CourseModeNotFound: raised if the course mode is not found.
"""
course_enrollment_info = _data_api().get_course_enrollment_info(course_id)
course_modes = course_enrollment_info["course_modes"]
available_modes = [m['slug'] for m in course_modes]
if mode not in available_modes:
msg = (
u"Specified course mode '{mode}' unavailable for course {course_id}. "
u"Available modes were: {available}"
).format(
mode=mode,
course_id=course_id,
available=", ".join(available_modes)
)
log.warn(msg)
raise errors.CourseModeNotFoundError(msg, course_enrollment_info)
def _data_api():
"""Returns a Data API.
This relies on Django settings to find the appropriate data API.
"""
# We retrieve the settings in-line here (rather than using the
# top-level constant), so that @override_settings will work
# in the test suite.
api_path = getattr(settings, "ENROLLMENT_DATA_API", DEFAULT_DATA_API)
try:
return importlib.import_module(api_path)
except (ImportError, ValueError):
log.exception(u"Could not load module at '{path}'".format(path=api_path))
raise errors.EnrollmentApiLoadError(api_path)
| agpl-3.0 |
windyuuy/opera | chromium/src/third_party/python_26/Lib/encodings/cp1254.py | 593 | 13758 | """ Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1254',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
skyfromwell/paperwallet | encryption.py | 1 | 1425 | #remove all others only keep Bip38 here. Need to learn more about this.
from bitcoin.bip38 import Bip38
from bitcoin.key import CKey
from bitcoin.base58 import CBase58Data
__b58chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
__b58base = len(__b58chars)
def encode_pw(key, pw):
key = CKey()
decode_string = __decode_b58(key)[1:-4]
key.generate(decode_string)
key.set_compressed(False)
bt = Bip38(key, pw)
return str(CBase58Data(bt.get_encrypted(), 0x01))
def __encode_b58(v):
value = 0L
for (i, c) in enumerate(v[::-1]):
value += (256**i) * ord(c)
result = ""
while value >= __b58base:
div, mod = divmod(value, __b58base)
result = __b58chars[mod] + result
value = div
result = __b58chars[value] + result
pad = 0
for c in v:
if c=='\0':
pad += 1
else:
break
return (__b58chars[0]*pad) + result
def __decode_b58(v):
value = 0L
for (i, c) in enumerate(v[::-1]):
value += __b58chars.find(c) * (__b58base**i)
result = ""
while value >= 256:
div, mod = divmod(value, 256)
result = chr(mod) + result
value = div
result = chr(value) + result
pad = 0
for c in v:
if c==__b58chars[0]:
pad += 1
else:
break
result = chr(0)*pad + result
return result
| gpl-3.0 |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_JMI.py | 1 | 1528 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import JMI
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx,_,_ = JMI.jmi(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| mit |
scott-eddy/mavlink | pymavlink/examples/mavtester.py | 43 | 1145 | #!/usr/bin/env python
'''
test mavlink messages
'''
import sys, struct, time, os
from curses import ascii
from pymavlink import mavtest, mavutil
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--baudrate", type=int,
help="master port baud rate", default=115200)
parser.add_argument("--device", required=True, help="serial device")
parser.add_argument("--source-system", dest='SOURCE_SYSTEM', type=int,
default=255, help='MAVLink source system for this GCS')
args = parser.parse_args()
def wait_heartbeat(m):
'''wait for a heartbeat so we know the target system IDs'''
print("Waiting for APM heartbeat")
msg = m.recv_match(type='HEARTBEAT', blocking=True)
print("Heartbeat from APM (system %u component %u)" % (m.target_system, m.target_system))
# create a mavlink serial instance
master = mavutil.mavlink_connection(args.device, baud=args.baudrate, source_system=args.SOURCE_SYSTEM)
# wait for the heartbeat msg to find the system ID
wait_heartbeat(master)
print("Sending all message types")
mavtest.generate_outputs(master.mav)
| lgpl-3.0 |
noba3/KoTos | addons/plugin.video.rtl2-now.de/resources/lib/kodion/utils/function_cache.py | 4 | 3125 | from functools import partial
import hashlib
import datetime
from storage import Storage
class FunctionCache(Storage):
ONE_MINUTE = 60
ONE_HOUR = 60 * ONE_MINUTE
ONE_DAY = 24 * ONE_HOUR
ONE_WEEK = 7 * ONE_DAY
ONE_MONTH = 4 * ONE_WEEK
def __init__(self, filename, max_file_size_kb=-1):
Storage.__init__(self, filename, max_file_size_kb=max_file_size_kb)
self._enabled = True
pass
def clear(self):
self._clear()
pass
def enabled(self):
"""
Enables the caching
:return:
"""
self._enabled = True
pass
def disable(self):
"""
Disable caching e.g. for tests
:return:
"""
self._enabled = False
pass
def _create_id_from_func(self, partial_func):
"""
Creats an id from the given function
:param partial_func:
:return: id for the given function
"""
m = hashlib.md5()
m.update(partial_func.func.__module__)
m.update(partial_func.func.__name__)
m.update(str(partial_func.args))
m.update(str(partial_func.keywords))
return m.hexdigest()
def _get_cached_data(self, partial_func):
cache_id = self._create_id_from_func(partial_func)
return self._get(cache_id), cache_id
def get_cached_only(self, func, *args, **keywords):
partial_func = partial(func, *args, **keywords)
# if caching is disabled call the function
if not self._enabled:
return partial_func()
# only return before cached data
data, cache_id = self._get_cached_data(partial_func)
if data is not None:
return data[0]
return None
def get(self, seconds, func, *args, **keywords):
def _seconds_difference(_first, _last):
_delta = _last - _first
return 24*60*60*_delta.days + _delta.seconds + _delta.microseconds/1000000.
"""
Returns the cached data of the given function.
:param partial_func: function to cache
:param seconds: time to live in seconds
:param return_cached_only: return only cached data and don't call the function
:return:
"""
partial_func = partial(func, *args, **keywords)
# if caching is disabled call the function
if not self._enabled:
return partial_func()
cached_data = None
cached_time = None
data, cache_id = self._get_cached_data(partial_func)
if data is not None:
cached_data = data[0]
cached_time = data[1]
pass
now = datetime.datetime.now()
if cached_time is not None:
# this is so stupid, but we have the function 'total_seconds' only starting with python 2.7
diff_seconds = _seconds_difference(cached_time, now)
pass
if cached_data is None or diff_seconds > seconds:
cached_data = partial_func()
self._set(cache_id, cached_data)
pass
return cached_data
pass
| gpl-2.0 |
naterh/chipsec | source/tool/chipsec/hal/cpuid.py | 1 | 1845 | #!/usr/local/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
## \addtogroup hal
# chipsec/hal/cpuid.py
# ======================
# CPUID information
# ~~~
# #usage:
# cpuid(0)
# ~~~
#
__version__ = '1.0'
import struct
import sys
import os.path
from chipsec.logger import logger
class CpuIDRuntimeError (RuntimeError):
pass
class CpuID:
def __init__( self, cs ):
self.helper = cs.helper
self.cs = cs
def cpuid(self, eax, ecx ):
if logger().VERBOSE: logger().log( "[cpuid] in: EAX=0x%08X, ECX=0x%08X" % (eax, ecx) )
(eax, ebx, ecx, edx) = self.helper.cpuid( eax, ecx )
if logger().VERBOSE: logger().log( "[cpuid] out: EAX=0x%08X, EBX=0x%08X, ECX=0x%08X, EDX=0x%08X" % (eax, ebx, ecx, edx) )
return (eax, ebx, ecx, edx)
| gpl-2.0 |
kevinastone/sentry | src/sentry/migrations/0152_auto__add_field_file_checksum__chg_field_file_name__add_unique_file_na.py | 36 | 32982 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'File.checksum'
db.add_column('sentry_file', 'checksum',
self.gf('django.db.models.fields.CharField')(max_length=32, null=True),
keep_default=False)
# Changing field 'File.name'
db.alter_column('sentry_file', 'name', self.gf('django.db.models.fields.CharField')(max_length=128))
# Adding unique constraint on 'File', fields ['name', 'checksum']
db.create_unique('sentry_file', ['name', 'checksum'])
def backwards(self, orm):
# Removing unique constraint on 'File', fields ['name', 'checksum']
db.delete_unique('sentry_file', ['name', 'checksum'])
# Deleting field 'File.checksum'
db.delete_column('sentry_file', 'checksum')
# Changing field 'File.name'
db.alter_column('sentry_file', 'name', self.gf('django.db.models.fields.CharField')(max_length=256))
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'unique_together': "(('name', 'checksum'),)", 'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
chenss/ChatRoom | 14.5 已经能运行(虽然有很多Warning)的Django-nonrel框架/django/core/files/base.py | 235 | 3888 | import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.utils.encoding import smart_str, smart_unicode
from django.core.files.utils import FileProxyMixin
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
self.mode = getattr(file, 'mode', None)
def __str__(self):
return smart_str(self.name or '')
def __unicode__(self):
return smart_unicode(self.name or u'')
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self or "None")
def __nonzero__(self):
return bool(self.name)
def __len__(self):
return self.size
def _get_size(self):
if not hasattr(self, '_size'):
if hasattr(self.file, 'size'):
self._size = self.file.size
elif os.path.exists(self.file.name):
self._size = os.path.getsize(self.file.name)
else:
raise AttributeError("Unable to determine the file's size.")
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
if hasattr(self, 'seek'):
self.seek(0)
# Assume the pointer is at zero...
counter = self.size
while counter > 0:
yield self.read(chunk_size)
counter -= chunk_size
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
chunk_buffer = StringIO(chunk)
for line in chunk_buffer:
if buffer_:
line = buffer_ + line
buffer_ = None
# If this is the end of a line, yield
# otherwise, wait for the next round
if line[-1] in ('\n', '\r'):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content):
content = content or ''
super(ContentFile, self).__init__(StringIO(content))
self.size = len(content)
def __str__(self):
return 'Raw content'
def __nonzero__(self):
return True
def open(self, mode=None):
self.seek(0)
def close(self):
pass
| gpl-2.0 |
carrdelling/project_euler | problem17.py | 1 | 1728 | #!/usr/bin/env python
################################################################################
#
# Project Euler - Problem 17
#
# If the numbers 1 to 5 are written out in words: one, two, three, four, five,
# then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
#
# If all the numbers from 1 to 1000 (one thousand) inclusive were written out in
# words, how many letters would be used?
#
# NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and
# forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20
# letters. The use of "and" when writing out numbers is in compliance with
# British usage.
#
# Joaquin Derrac - carrdelling@gmail.com
#
################################################################################
units = {1: 3, 2: 3, 3: 5, 4: 4, 5: 4, 6: 3, 7: 5, 8: 5, 9: 4, 0: 0}
tens = {2: 6, 3: 6, 4: 5, 5: 5, 6: 5, 7: 7, 8: 6, 9: 6}
hundreds = {0: 0, 1: 13, 2: 13, 3: 15, 4: 14, 5: 14, 6: 13, 7: 15, 8: 15, 9: 14}
ten_to_nineteen = {10: 3, 11: 6, 12: 6, 13: 8, 14: 8, 15: 7, 16: 7, 17: 9,
18: 8, 19: 8}
def number_str_lenght(number):
h = number / 100
du = number % 100
d = du / 10
u = du % 10
if du < 1:
# no need for the 'and'
num_length = hundreds[h] - 3
elif 0 < du <= 9:
num_length = hundreds[h] + units[u]
elif 9 < du <= 19:
num_length = hundreds[h] + ten_to_nineteen[du]
else:
num_length = hundreds[h] + tens[d] + units[u]
return num_length
if __name__ == "__main__":
solution = 0
for i in range(1, 1000):
length = number_str_lenght(i)
solution += length
# the last one - 1000
solution += 11
print(solution)
| gpl-2.0 |
2014c2g2/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/pkgdata.py | 603 | 2146 | """pkgdata is a simple, extensible way for a package to acquire data file
resources.
The getResource function is equivalent to the standard idioms, such as
the following minimal implementation::
import sys, os
def getResource(identifier, pkgname=__name__):
pkgpath = os.path.dirname(sys.modules[pkgname].__file__)
path = os.path.join(pkgpath, identifier)
return file(os.path.normpath(path), mode='rb')
When a __loader__ is present on the module given by __name__, it will defer
getResource to its get_data implementation and return it as a file-like
object (such as StringIO).
"""
__all__ = ['getResource']
import sys
import os
#from cStringIO import StringIO
from io import StringIO
try:
# Try to use setuptools if available.
from pkg_resources import resource_stream
_have_resource_stream = True
except ImportError:
_have_resource_stream = False
def getResource(identifier, pkgname=__name__):
"""Acquire a readable object for a given package name and identifier.
An IOError will be raised if the resource can not be found.
For example::
mydata = getResource('mypkgdata.jpg').read()
Note that the package name must be fully qualified, if given, such
that it would be found in sys.modules.
In some cases, getResource will return a real file object. In that
case, it may be useful to use its name attribute to get the path
rather than use it as a file-like object. For example, you may
be handing data off to a C API.
"""
# Prefer setuptools
if _have_resource_stream:
return resource_stream(pkgname, identifier)
mod = sys.modules[pkgname]
fn = getattr(mod, '__file__', None)
if fn is None:
raise IOError("%r has no __file__!")
path = os.path.join(os.path.dirname(fn), identifier)
loader = getattr(mod, '__loader__', None)
if loader is not None:
try:
data = loader.get_data(path)
except IOError:
pass
else:
return StringIO(data)
#return file(os.path.normpath(path), 'rb')
return open(os.path.normpath(path), 'rb')
| gpl-3.0 |
azureplus/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/sessions/middleware.py | 215 | 2031 | import time
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.utils.importlib import import_module
class SessionMiddleware(object):
def process_request(self, request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = engine.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| apache-2.0 |
zhou533/shadowsocks | shadowsocks/shell.py | 270 | 12676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
| apache-2.0 |
yetsky/extra | packages/my-application/python-all/files/usr/lib/python2.7/encodings/iso2022_jp_2.py | 816 | 1061 | #
# iso2022_jp_2.py: Python Unicode Codec for ISO2022_JP_2
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-2.0 |
aceway/cppite | src/py/cppite.py | 1 | 13042 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
########################################################
# ITE command start with: #//
# ITE command keywords:quit,exit,byebye,bye, begin, end,
# verbose, concise, dump_project, dump_make_file, dump_cpp,
# dump_fragment,load_fragment, compile, run, edit
#
########################################################
import os
import commands
import settings as st
import utils as ut
from cpp_fragment_tmpl import hpp_tmpl, cpp_tmpl
from CMakeLists_tmpl import cmakelists_tmpl
class CppIte:
def __init__(self):
self.cpp_fragment = []
self.ite_cmd = []
self.include_files = []
self.include_dirs = []
self.static_files = []
self.is_verbose=False
# command full name and its shortkeys
self.ite_cmd_keymap={
'RUN': ("R", "RU"),
'COMPILE': ("C", "CO", "COM", "COMP"),
'VERBOSE': ("V", "VE", "VERB"),
'SIMPLE': ("S", "SI", "SIM"),
'CLEAR': ("CL", "CLE", ),
'SHOW': ("SH", "SHO", ),
'HELP': ("H", "HEL", ),
'RELOAD_SETTING': ('RS', 'REST'),
'CMD_CLEAR': ("CCL", "CCLE", ),
'CMD_HISTORY': ("CH", "CHIS", ),
'ADD_INCLUDE_FILE': ("AIF", ),
'RM_INCLUDE_FILE': ("RIF", "REMOVE_INCLUDE_FILE"),
'ADD_INCLUDE_DIR': ("AID", ),
'RM_INCLUDE_DIR': ("RID", "REMOVE_INCLUDE_DIR"),
'LIST_INCLUDE_FILE':("LIF", ),
'LIST_INCLUDE_DIR': ("LID", ),
'ADD_STATIC_FILE': ('ASF', ),
'LIST_STATIC_FILE': ('LSF', ),
'RM_STATIC_FILE': ('RSF', "REMOVE_STATIC_FILE"),
'LOAD_FRAG_FILE': ('LFF', 'LDFF'),
}
def is_ite_cmd(self, ri):
""" Test wether the raw input is a ITE(interactive test environment) command
or its c++ code fragment.
"""
if ri.strip().startswith( "#//" ):
self.ite_cmd.append( ri.strip().strip("#//") )
return True
else:
self.cpp_fragment.append( ri )
return False
def do_ite_cmd(self):
""" Do the ITE command """
cmd = self.ite_cmd[-1].strip().split(" ")
ite_cmd=cmd[0].upper()
args=cmd[1:]
if ite_cmd in self.ite_cmd_keymap:
ite_cmd=cmd[0].upper()
args=cmd[1:]
else:
for k, v in self.ite_cmd_keymap.items():
if ite_cmd in v:
ite_cmd=k.upper()
args=cmd[1:]
break
if self.is_verbose:
print "Do c++ ITE command:{c} {a}".format( c = ite_cmd, a=args )
self._do_cmd( ite_cmd.lower(), args )
def _do_cmd( self, cmd, *args, **keywords ):
"""
Private command proxy, execute by command name rule."
"""
if hasattr( self, "cmd_" + cmd.strip().lower() ) \
and callable( getattr(self, "cmd_" + cmd.strip().lower() ) ):
func = getattr(self, "cmd_" + cmd.strip().lower() )
try:
ret = apply( func, *args, **keywords )
except Exception, e:
print "{e}".format( e = e )
ret = None
return ret
else:
print "{c}Not surpported command:{cmd}{e}".format( c=st.color.FG_RED, cmd=cmd, e=st.color.END )
return None
def cmd_help(self, name=None):
"""Print the cppite command help info."""
if name is None:
print "{c}cppite command start with '#//' in the console line, here is all the supported commands:{e}"\
.format(c=st.color.FG_GREEN, e=st.color.END)
cmds = [ c for c in dir(self) if c.startswith("cmd_") ]
for c in cmds:
sc = ",".join( self.ite_cmd_keymap[ c[4:].upper() ] )
print "{c}: {s}. Short command:{sc}\n".format( c=c[4:], s=getattr(self, c).__doc__, sc=sc)
else:
name = name.lower()
cmd_name = "cmd_{n}".format( n= name )
if hasattr(self, cmd_name):
sc = ",".join( self.ite_cmd_keymap[ name.upper() ] )
print "{n}: {s}. Short command:{sc}".format( n=name, s= getattr(self, cmd_name).__doc__, sc=sc)
else:
print "{c}Not surpported command:{n}{e}".format( n=name, c=st.color.FG_RED, e=st.color.END )
def cmd_reload_setting(self):
"""Reload the settings.py"""
reload( st )
def cmd_cmd_history(self):
"""Show cppite commands history that you inputted before."""
for cmd in self.ite_cmd[:-1]:
print "{c}".format( c = cmd.strip() )
def cmd_cmd_clear(self):
"""Clear cppite cached commands"""
self.ite_cmd = []
def cmd_verbose(self):
"""Run in verbose mode, print process detail info."""
self.is_verbose = True
def cmd_simple(self):
"""Run in simple mode, only print the result but no process info."""
self.is_verbose = False
def cmd_show(self):
"""Show the inputted c++ code that cached in cppite temp memory"""
if self.is_verbose:
print "{c}Show the cached c++ code:{e}".format( c=st.color.FG_GREEN, e=st.color.END )
for c in self.cpp_fragment:
print c
def cmd_clear(self):
"""Clear the inputted c++ code that cached in cppite temp memory"""
if self.is_verbose:
print "{c}Clear the cached c++ code:\n{cd}\n{e}". \
format( c=st.color.FG_YELLOW, cd="\n".join(self.cpp_fragment), e=st.color.END )
self.cpp_fragment = []
def cmd_compile(self):
"""Compile the c++ code in cppite caching memory."""
if self.is_verbose:
print "Compile c++ code: {cpp}".format( cpp="\n".join(self.cpp_fragment) )
self.gen_cpp_code_file()
self.gen_cmakelist_file()
return self.exec_bash_cmd( st.compile_tool )
def cmd_run(self):
"""Compile the inputted c++ code and run it"""
if self.is_verbose:
print "Run c++ code fragment: {cpp}".format( cpp="\n".join(self.cpp_fragment) )
if os.path.isfile( st.out_bin_exe ):
status, output = self.exec_bash_cmd( st.out_bin_exe )
if status == 0: print output
else:
print "{c}Cannot find and gen {bf}!{e}".format( c=st.color.FG_RED, bf=st.out_bin_exe, e=st.color.END )
def cmd_list_include_file(self):
"""List c++ include header files"""
print "Now c++ include header file:"
for hf in st.default_include_headers:
print "\t", hf
for hf in self.include_files:
print "\t", hf
def cmd_list_include_dir(self):
"""List c++ include header dirs"""
print "Now c++ include header dir:"
for hd in st.default_include_dirs:
print "\t", hd
for hd in self.include_dirs:
print "\t", hd
def cmd_list_static_file(self):
"""List cmake link static file"""
print "Now cmake link static files:"
for sf in st.default_static_files:
print "\t", sf
for sf in self.static_files:
print "\t", sf
def cmd_add_include_file(self, *file_list):
"""Add c++ include header files"""
if len(file_list) == 0:
print "Need header file name!"
for f in file_list:
if f.strip() in self.include_files:
pass
else:
self.include_files.append( f.strip() )
def cmd_add_include_dir(self, *dir_list):
"""Add c++ include header dirs"""
if len(dir_list) == 0:
print "Need dir name!"
for d in dir_list:
if d.strip() in self.include_dirs:
pass
else:
self.include_dirs.append( d.strip() )
def cmd_add_static_file(self, *file_list):
"""Add static file"""
for f in file_list:
if f.strip() in self.static_files:
pass
else:
self.static_files.append( f.strip() )
def cmd_rm_include_file(self, *file_list):
"""Remove c++ include header files"""
for f in file_list:
if f.strip() in self.include_files:
self.include_files.remove( f.strip() )
else:
pass
def cmd_rm_include_dir(self, *dir_list):
"""Remove c++ include header dirs"""
for d in dir_list:
if d.strip() in self.include_dirs:
self.include_dirs.remove( d.strip() )
else:
pass
def cmd_rm_static_file(self, *file_list):
"""Remove static file from cache"""
for f in file_list:
if f.strip() in self.static_files:
self.static_files.remove( f.strip() )
else:
pass
def cmd_load_frag_file(self, *the_file):
"""Load frag code from a file"""
if len(the_file) == 1:
if os.path.isfile( the_file[0] ):
with open(the_file[0], 'r') as rf:
for line in rf:
self.cpp_fragment.append( line );
else:
print "{c}It's not valid file:{f}.{e}".format( c = st.color.FG_RED, e = st.color.END, f=the_file[0] )
pass
else:
print "{c}Only one file once, but now({ln}):{tf}{e}".format( c = st.color.FG_RED, e = st.color.END, ln=len(the_file), tf=the_file )
def gen_cpp_code_file(self):
"""Use the input c++ code fragment(cached in the list) to generate c++ hpp/cpp file."""
if self.is_verbose:
print "Generating c++ code... {f}".format( f = st.cpp_code_dir )
includes=""
for f in st.default_include_headers:
if f.find('.') < 0 or f.endswith('.h') or f.endswith('.hpp'):
the_include = "#include <{f}>\n".format( f=f )
if includes.find( the_include ) < 0:
includes += the_include
for f in self.include_files:
if f.find('.') < 0 or f.endswith('.h') or f.endswith('.hpp'):
the_include = "#include <{f}>\n".format( f=f )
if includes.find( the_include ) < 0:
includes += the_include
hpp_code= hpp_tmpl.format( includes=includes )
cpp_code = cpp_tmpl.format( head_file=st.hpp_filename, tmp_cpp= "\n".join(self.cpp_fragment) )
with open( st.cpp_code_dir + st.hpp_filename, 'w') as hf:
hf.write( hpp_code )
with open( st.cpp_code_dir + st.cpp_filename, 'w') as cf:
cf.write( cpp_code )
def gen_cmakelist_file(self):
"""Use the input and default config data to generate cmake's CMakeLists.txt"""
include_dirs = ""
for ind in st.default_include_dirs:
include_dirs += "{d}\n".format( d = ind )
for ind in self.include_dirs:
include_dirs += "{d}\n".format( d = ind )
static_files = ""
for sf in st.default_static_files:
static_files += "{s}\n".format( s = sf )
for sf in self.static_files:
static_files += "{s}\n".format( s = sf )
cmake_tmpl=cmakelists_tmpl.format( add_include_dirs=include_dirs, add_static_libs=static_files )
with open( st.cmakelists_dir + st.cmakelists_filename, 'w') as cmf:
cmf.write( cmake_tmpl )
def exec_bash_cmd(self, cmd):
"""
Call the bash command or scripts, and get the return info.
"""
the_data = {}
cmd = "{sh} ".format(sh=cmd)
(status, output) = commands.getstatusoutput(cmd)
if status == 0:
the_data['code'] = 0
the_data['data'] = output
the_data['desc'] = "OK"
else:
info = output.split(" ")
new_info = []
# 屏蔽密码
for d in info:
if len(d) > 2 and d.lower().startswith("-p"):
d = "-p******"
elif len(d) > 2 and d.lower().startswith('"-p'):
d = "-p******"
elif len(d) > 2 and d.lower().startswith("'-p"):
d = "-p******"
else:
d = d
new_info.append(d)
output = " ".join(new_info)
the_data['code'] = -1
the_data['data'] = "<br>{op}".format(op=output)
the_data['desc'] = "{op}".format(op=output)
if status != 0:
print "{c}{out}{e}".format( c=st.color.FG_RED, out=output, e=st.color.END )
elif self.is_verbose:
print "{c}{out}{e}".format( c=st.color.FG_GREEN, out=output, e=st.color.END )
return status, output
| mit |
jimmy-ren/RPN2T | external/_caffe/scripts/copy_notebook.py | 75 | 1089 | #!/usr/bin/env python
"""
Takes as arguments:
1. the path to a JSON file (such as an IPython notebook).
2. the path to output file
If 'metadata' dict in the JSON file contains 'include_in_docs': true,
then copies the file to output file, appending the 'metadata' property
as YAML front-matter, adding the field 'category' with value 'notebook'.
"""
import os
import sys
import json
filename = sys.argv[1]
output_filename = sys.argv[2]
content = json.load(open(filename))
if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
yaml_frontmatter = ['---']
for key, val in content['metadata'].iteritems():
if key == 'example_name':
key = 'title'
if val == '':
val = os.path.basename(filename)
yaml_frontmatter.append('{}: {}'.format(key, val))
yaml_frontmatter += ['category: notebook']
yaml_frontmatter += ['original_path: ' + filename]
with open(output_filename, 'w') as fo:
fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
fo.write(open(filename).read())
| mit |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/json/tests/test_pass1.py | 37 | 1967 | from json.tests import PyTest, CTest
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E666,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],
"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066
,"rosebud"]
'''
class TestPass1(object):
def test_parse(self):
# test in/out equivalence and parsing
res = self.loads(JSON)
out = self.dumps(res)
self.assertEqual(res, self.loads(out))
try:
self.dumps(res, allow_nan=False)
except ValueError:
pass
else:
self.fail("23456789012E666 should be out of range")
class TestPyPass1(TestPass1, PyTest): pass
class TestCPass1(TestPass1, CTest): pass
| mit |
bdh1011/wau | venv/lib/python2.7/site-packages/notebook/services/kernelspecs/handlers.py | 1 | 2798 | """Tornado handlers for kernel specifications.
Preliminary documentation at https://github.com/ipython/ipython/wiki/IPEP-25%3A-Registry-of-installed-kernels#rest-api
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
import json
import os
pjoin = os.path.join
from tornado import web
from ...base.handlers import APIHandler, json_errors
from ...utils import url_path_join
def kernelspec_model(handler, name):
"""Load a KernelSpec by name and return the REST API model"""
ksm = handler.kernel_spec_manager
spec = ksm.get_kernel_spec(name)
d = {'name': name}
d['spec'] = spec.to_dict()
d['resources'] = resources = {}
resource_dir = spec.resource_dir
for resource in ['kernel.js', 'kernel.css']:
if os.path.exists(pjoin(resource_dir, resource)):
resources[resource] = url_path_join(
handler.base_url,
'kernelspecs',
name,
resource
)
for logo_file in glob.glob(pjoin(resource_dir, 'logo-*')):
fname = os.path.basename(logo_file)
no_ext, _ = os.path.splitext(fname)
resources[no_ext] = url_path_join(
handler.base_url,
'kernelspecs',
name,
fname
)
return d
class MainKernelSpecHandler(APIHandler):
SUPPORTED_METHODS = ('GET', 'OPTIONS')
@web.authenticated
@json_errors
def get(self):
ksm = self.kernel_spec_manager
km = self.kernel_manager
model = {}
model['default'] = km.default_kernel_name
model['kernelspecs'] = specs = {}
for kernel_name in ksm.find_kernel_specs():
try:
d = kernelspec_model(self, kernel_name)
except Exception:
self.log.error("Failed to load kernel spec: '%s'", kernel_name, exc_info=True)
continue
specs[kernel_name] = d
self.set_header("Content-Type", 'application/json')
self.finish(json.dumps(model))
@web.authenticated
@json_errors
def options(self):
self.finish()
class KernelSpecHandler(APIHandler):
SUPPORTED_METHODS = ('GET',)
@web.authenticated
@json_errors
def get(self, kernel_name):
try:
model = kernelspec_model(self, kernel_name)
except KeyError:
raise web.HTTPError(404, u'Kernel spec %s not found' % kernel_name)
self.set_header("Content-Type", 'application/json')
self.finish(json.dumps(model))
# URL to handler mappings
kernel_name_regex = r"(?P<kernel_name>\w+)"
default_handlers = [
(r"/api/kernelspecs", MainKernelSpecHandler),
(r"/api/kernelspecs/%s" % kernel_name_regex, KernelSpecHandler),
]
| mit |
Ritsyy/fjord | vendor/packages/requests-2.7.0/requests/structures.py | 1160 | 2977 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| bsd-3-clause |
stamhe/zulip | api/zulip/__init__.py | 115 | 17705 | # -*- coding: utf-8 -*-
# Copyright © 2012-2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import simplejson
import requests
import time
import traceback
import urlparse
import sys
import os
import optparse
import platform
import urllib
import random
from distutils.version import LooseVersion
from ConfigParser import SafeConfigParser
import logging
__version__ = "0.2.4"
logger = logging.getLogger(__name__)
# Check that we have a recent enough version
# Older versions don't provide the 'json' attribute on responses.
assert(LooseVersion(requests.__version__) >= LooseVersion('0.12.1'))
# In newer versions, the 'json' attribute is a function, not a property
requests_json_is_function = callable(requests.Response.json)
API_VERSTRING = "v1/"
class CountingBackoff(object):
def __init__(self, maximum_retries=10, timeout_success_equivalent=None):
self.number_of_retries = 0
self.maximum_retries = maximum_retries
self.timeout_success_equivalent = timeout_success_equivalent
self.last_attempt_time = 0
def keep_going(self):
self._check_success_timeout()
return self.number_of_retries < self.maximum_retries
def succeed(self):
self.number_of_retries = 0
self.last_attempt_time = time.time()
def fail(self):
self._check_success_timeout()
self.number_of_retries = min(self.number_of_retries + 1,
self.maximum_retries)
self.last_attempt_time = time.time()
def _check_success_timeout(self):
if (self.timeout_success_equivalent is not None
and self.last_attempt_time != 0
and time.time() - self.last_attempt_time > self.timeout_success_equivalent):
self.number_of_retries = 0
class RandomExponentialBackoff(CountingBackoff):
def fail(self):
super(RandomExponentialBackoff, self).fail()
# Exponential growth with ratio sqrt(2); compute random delay
# between x and 2x where x is growing exponentially
delay_scale = int(2 ** (self.number_of_retries / 2.0 - 1)) + 1
delay = delay_scale + random.randint(1, delay_scale)
message = "Sleeping for %ss [max %s] before retrying." % (delay, delay_scale * 2)
try:
logger.warning(message)
except NameError:
print message
time.sleep(delay)
def _default_client():
return "ZulipPython/" + __version__
def generate_option_group(parser, prefix=''):
group = optparse.OptionGroup(parser, 'Zulip API configuration')
group.add_option('--%ssite' % (prefix,),
dest="zulip_site",
help="Zulip server URI",
default=None)
group.add_option('--%sapi-key' % (prefix,),
dest="zulip_api_key",
action='store')
group.add_option('--%suser' % (prefix,),
dest='zulip_email',
help='Email address of the calling bot or user.')
group.add_option('--%sconfig-file' % (prefix,),
action='store',
dest="zulip_config_file",
help='Location of an ini file containing the\nabove information. (default ~/.zuliprc)')
group.add_option('-v', '--verbose',
action='store_true',
help='Provide detailed output.')
group.add_option('--%sclient' % (prefix,),
action='store',
default=None,
dest="zulip_client",
help=optparse.SUPPRESS_HELP)
return group
def init_from_options(options, client=None):
if options.zulip_client is not None:
client = options.zulip_client
elif client is None:
client = _default_client()
return Client(email=options.zulip_email, api_key=options.zulip_api_key,
config_file=options.zulip_config_file, verbose=options.verbose,
site=options.zulip_site, client=client)
def get_default_config_filename():
config_file = os.path.join(os.environ["HOME"], ".zuliprc")
if (not os.path.exists(config_file) and
os.path.exists(os.path.join(os.environ["HOME"], ".humbugrc"))):
raise RuntimeError("The Zulip API configuration file is now ~/.zuliprc; please run:\n\n mv ~/.humbugrc ~/.zuliprc\n")
return config_file
class Client(object):
def __init__(self, email=None, api_key=None, config_file=None,
verbose=False, retry_on_errors=True,
site=None, client=None):
if client is None:
client = _default_client()
if None in (api_key, email):
if config_file is None:
config_file = get_default_config_filename()
if not os.path.exists(config_file):
raise RuntimeError("api_key or email not specified and %s does not exist"
% (config_file,))
config = SafeConfigParser()
with file(config_file, 'r') as f:
config.readfp(f, config_file)
if api_key is None:
api_key = config.get("api", "key")
if email is None:
email = config.get("api", "email")
if site is None and config.has_option("api", "site"):
site = config.get("api", "site")
self.api_key = api_key
self.email = email
self.verbose = verbose
if site is not None:
if not site.startswith("http"):
site = "https://" + site
# Remove trailing "/"s from site to simplify the below logic for adding "/api"
site = site.rstrip("/")
self.base_url = site
else:
self.base_url = "https://api.zulip.com"
if self.base_url != "https://api.zulip.com" and not self.base_url.endswith("/api"):
self.base_url += "/api"
self.base_url += "/"
self.retry_on_errors = retry_on_errors
self.client_name = client
def get_user_agent(self):
vendor = ''
vendor_version = ''
try:
vendor = platform.system()
vendor_version = platform.release()
except IOError:
# If the calling process is handling SIGCHLD, platform.system() can
# fail with an IOError. See http://bugs.python.org/issue9127
pass
if vendor == "Linux":
vendor, vendor_version, dummy = platform.linux_distribution()
elif vendor == "Windows":
vendor_version = platform.win32_ver()[1]
elif vendor == "Darwin":
vendor_version = platform.mac_ver()[0]
return "{client_name} ({vendor}; {vendor_version})".format(
client_name=self.client_name,
vendor=vendor,
vendor_version=vendor_version,
)
def do_api_query(self, orig_request, url, method="POST", longpolling = False):
request = {}
for (key, val) in orig_request.iteritems():
if not (isinstance(val, str) or isinstance(val, unicode)):
request[key] = simplejson.dumps(val)
else:
request[key] = val
query_state = {
'had_error_retry': False,
'request': request,
'failures': 0,
}
def error_retry(error_string):
if not self.retry_on_errors or query_state["failures"] >= 10:
return False
if self.verbose:
if not query_state["had_error_retry"]:
sys.stdout.write("zulip API(%s): connection error%s -- retrying." % \
(url.split(API_VERSTRING, 2)[0], error_string,))
query_state["had_error_retry"] = True
else:
sys.stdout.write(".")
sys.stdout.flush()
query_state["request"]["dont_block"] = simplejson.dumps(True)
time.sleep(1)
query_state["failures"] += 1
return True
def end_error_retry(succeeded):
if query_state["had_error_retry"] and self.verbose:
if succeeded:
print "Success!"
else:
print "Failed!"
while True:
try:
if method == "GET":
kwarg = "params"
else:
kwarg = "data"
kwargs = {kwarg: query_state["request"]}
res = requests.request(
method,
urlparse.urljoin(self.base_url, url),
auth=requests.auth.HTTPBasicAuth(self.email,
self.api_key),
verify=True, timeout=90,
headers={"User-agent": self.get_user_agent()},
**kwargs)
# On 50x errors, try again after a short sleep
if str(res.status_code).startswith('5'):
if error_retry(" (server %s)" % (res.status_code,)):
continue
# Otherwise fall through and process the python-requests error normally
except (requests.exceptions.Timeout, requests.exceptions.SSLError) as e:
# Timeouts are either a Timeout or an SSLError; we
# want the later exception handlers to deal with any
# non-timeout other SSLErrors
if (isinstance(e, requests.exceptions.SSLError) and
str(e) != "The read operation timed out"):
raise
if longpolling:
# When longpolling, we expect the timeout to fire,
# and the correct response is to just retry
continue
else:
end_error_retry(False)
return {'msg': "Connection error:\n%s" % traceback.format_exc(),
"result": "connection-error"}
except requests.exceptions.ConnectionError:
if error_retry(""):
continue
end_error_retry(False)
return {'msg': "Connection error:\n%s" % traceback.format_exc(),
"result": "connection-error"}
except Exception:
# We'll split this out into more cases as we encounter new bugs.
return {'msg': "Unexpected error:\n%s" % traceback.format_exc(),
"result": "unexpected-error"}
try:
if requests_json_is_function:
json_result = res.json()
else:
json_result = res.json
except Exception:
json_result = None
if json_result is not None:
end_error_retry(True)
return json_result
end_error_retry(False)
return {'msg': "Unexpected error from the server", "result": "http-error",
"status_code": res.status_code}
@classmethod
def _register(cls, name, url=None, make_request=(lambda request={}: request),
method="POST", computed_url=None, **query_kwargs):
if url is None:
url = name
def call(self, *args, **kwargs):
request = make_request(*args, **kwargs)
if computed_url is not None:
req_url = computed_url(request)
else:
req_url = url
return self.do_api_query(request, API_VERSTRING + req_url, method=method, **query_kwargs)
call.func_name = name
setattr(cls, name, call)
def call_on_each_event(self, callback, event_types=None, narrow=[]):
def do_register():
while True:
if event_types is None:
res = self.register()
else:
res = self.register(event_types=event_types, narrow=narrow)
if 'error' in res.get('result'):
if self.verbose:
print "Server returned error:\n%s" % res['msg']
time.sleep(1)
else:
return (res['queue_id'], res['last_event_id'])
queue_id = None
while True:
if queue_id is None:
(queue_id, last_event_id) = do_register()
res = self.get_events(queue_id=queue_id, last_event_id=last_event_id)
if 'error' in res.get('result'):
if res["result"] == "http-error":
if self.verbose:
print "HTTP error fetching events -- probably a server restart"
elif res["result"] == "connection-error":
if self.verbose:
print "Connection error fetching events -- probably server is temporarily down?"
else:
if self.verbose:
print "Server returned error:\n%s" % res["msg"]
if res["msg"].startswith("Bad event queue id:"):
# Our event queue went away, probably because
# we were asleep or the server restarted
# abnormally. We may have missed some
# events while the network was down or
# something, but there's not really anything
# we can do about it other than resuming
# getting new ones.
#
# Reset queue_id to register a new event queue.
queue_id = None
# TODO: Make this back off once it's more reliable
time.sleep(1)
continue
for event in res['events']:
last_event_id = max(last_event_id, int(event['id']))
callback(event)
def call_on_each_message(self, callback):
def event_callback(event):
if event['type'] == 'message':
callback(event['message'])
self.call_on_each_event(event_callback, ['message'])
def _mk_subs(streams, **kwargs):
result = kwargs
result['subscriptions'] = streams
return result
def _mk_rm_subs(streams):
return {'delete': streams}
def _mk_deregister(queue_id):
return {'queue_id': queue_id}
def _mk_events(event_types=None, narrow=[]):
if event_types is None:
return dict()
return dict(event_types=event_types, narrow=narrow)
def _kwargs_to_dict(**kwargs):
return kwargs
class ZulipStream(object):
"""
A Zulip stream-like object
"""
def __init__(self, type, to, subject, **kwargs):
self.client = Client(**kwargs)
self.type = type
self.to = to
self.subject = subject
def write(self, content):
message = {"type": self.type,
"to": self.to,
"subject": self.subject,
"content": content}
self.client.send_message(message)
def flush(self):
pass
Client._register('send_message', url='messages', make_request=(lambda request: request))
Client._register('update_message', method='PATCH', url='messages', make_request=(lambda request: request))
Client._register('get_messages', method='GET', url='messages/latest', longpolling=True)
Client._register('get_events', url='events', method='GET', longpolling=True, make_request=(lambda **kwargs: kwargs))
Client._register('register', make_request=_mk_events)
Client._register('export', method='GET', url='export')
Client._register('deregister', url="events", method="DELETE", make_request=_mk_deregister)
Client._register('get_profile', method='GET', url='users/me')
Client._register('get_streams', method='GET', url='streams', make_request=_kwargs_to_dict)
Client._register('get_members', method='GET', url='users')
Client._register('list_subscriptions', method='GET', url='users/me/subscriptions')
Client._register('add_subscriptions', url='users/me/subscriptions', make_request=_mk_subs)
Client._register('remove_subscriptions', method='PATCH', url='users/me/subscriptions', make_request=_mk_rm_subs)
Client._register('get_subscribers', method='GET',
computed_url=lambda request: 'streams/%s/members' % (urllib.quote(request['stream'], safe=''),),
make_request=_kwargs_to_dict)
Client._register('render_message', method='GET', url='messages/render')
Client._register('create_user', method='POST', url='users')
| apache-2.0 |
hljyunxi/clearsilver | cdbi/ab_db.py | 9 | 3105 |
import MySQLdb
from hdfhelp import HdfRow, HdfItemList
from odb import *
class ABPeopleTable(Table):
def _defineRows(self):
self.d_addColumn("person_id", kInteger, primarykey = 1, autoincrement = 1)
self.d_addColumn("fullname", kVarString, 255)
self.d_addColumn("first_name", kVarString, 50)
self.d_addColumn("last_name", kVarString, 50)
self.d_addColumn("maiden_name", kVarString, 50)
self.d_addColumn("title", kVarString, 50)
self.d_addColumn("dob", kVarString, 14) # date
self.d_addColumn("note", kVarString, 255)
self.d_addColumn("primary_place_id", kInteger,
relations = [('ab_places', 'place_id')])
self.d_addColumn("primary_email_id", kInteger)
self.d_addColumn("primary_phone_id", kInteger)
class ABPlaceTable(Table):
def _defineRows(self):
self.d_addColumn("place_id", kInteger, primarykey = 1, autoincrement = 1)
self.d_addColumn("person_id", kInteger, default=0)
self.d_addColumn("address", kVarString, 255)
self.d_addColumn("city", kVarString, 255)
self.d_addColumn("state", kFixedString, 2)
self.d_addColumn("zip", kVarString, 15)
self.d_addColumn("country", kVarString, 100)
self.d_addColumn("valid_from", kVarString, 14) # date
self.d_addColumn("valid_to", kVarString, 14) # date
self.d_addColumn("note", kVarString, 255)
class ABEmailTable(Table):
def _defineRows(self):
self.d_addColumn("email", kVarString, 255, primarykey = 1)
self.d_addColumn("person_id", kInteger, default=0)
self.d_addColumn("last_received", kInteger) # timestamp
self.d_addColumn("valid_from", kVarString, 14) # date
self.d_addColumn("valid_to", kVarString, 14) # date
self.d_addColumn("note", kVarString, 255)
class ABPhoneTable(Table):
def _defineRows(self):
self.d_addColumn("phone_id", kInteger, primarykey = 1, autoincrement = 1)
self.d_addColumn("person_id", kInteger, default=0)
self.d_addColumn("place_id", kInteger, default=0)
self.d_addColumn("phone_number", kVarString, 20)
self.d_addColumn("phone_type", kFixedString, 1, default='o',
enum_values = { 'm' : 'mobile',
'h' : 'home',
'b' : 'business',
'p' : 'pager',
'f' : 'fax',
'o' : 'other'})
self.d_addColumn("valid_from", kVarString, 14) # date
self.d_addColumn("valid_to", kVarString, 14) # date
self.d_addColumn("note", kVarString, 255)
class DB(Database):
def __init__(self, db):
Database.__init__(self, db)
self.addTable("people", "ab_people", ABPeopleTable)
self.addTable("places", "ab_places", ABPlaceTable)
self.addTable("email", "ab_email", ABEmailTable)
self.addTable("phone", "ab_phone", ABPhoneTable)
def defaultRowClass(self):
return HdfRow
def defaultRowListClass(self):
return HdfItemList
def connect(host = 'localhost'):
db = MySQLdb.connect(host = host, user='blong', passwd='qwerty', db='blong')
return DB(db)
| bsd-2-clause |
tedder/ansible | lib/ansible/modules/storage/netapp/na_ontap_volume_clone.py | 22 | 6867 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_volume_clone
short_description: NetApp ONTAP manage volume clones.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create NetApp ONTAP volume clones.
- A FlexClone License is required to use this module
options:
state:
description:
- Whether volume clone should be created.
choices: ['present']
default: 'present'
parent_volume:
description:
- The parent volume of the volume clone being created.
required: true
volume:
description:
- The name of the volume clone being created.
required: true
vserver:
description:
- Vserver in which the volume clone should be created.
required: true
parent_snapshot:
description:
- Parent snapshot in which volume clone is created off.
parent_vserver:
description:
- Vserver of parent volume in which clone is created off.
qos_policy_group_name:
description:
- The qos-policy-group-name which should be set for volume clone.
space_reserve:
description:
- The space_reserve setting which should be used for the volume clone.
choices: ['volume', 'none']
volume_type:
description:
- The volume-type setting which should be used for the volume clone.
choices: ['rw', 'dp']
junction_path:
version_added: '2.8'
description:
- Junction path of the volume.
'''
EXAMPLES = """
- name: create volume clone
na_ontap_volume_clone:
state: present
username: "{{ netapp username }}"
password: "{{ netapp password }}"
hostname: "{{ netapp hostname }}"
vserver: vs_hack
parent_volume: normal_volume
volume: clone_volume_7
space_reserve: none
parent_snapshot: backup1
junction_path: /clone_volume_7
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVolumeClone(object):
"""
Creates a volume clone
"""
def __init__(self):
"""
Initialize the NetAppOntapVolumeClone class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present'], default='present'),
parent_volume=dict(required=True, type='str'),
volume=dict(required=True, type='str'),
vserver=dict(required=True, type='str'),
parent_snapshot=dict(required=False, type='str', default=None),
parent_vserver=dict(required=False, type='str', default=None),
qos_policy_group_name=dict(required=False, type='str', default=None),
space_reserve=dict(required=False, choices=['volume', 'none'], default=None),
volume_type=dict(required=False, choices=['rw', 'dp']),
junction_path=dict(required=False, type='str', default=None)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.state = parameters['state']
self.parent_snapshot = parameters['parent_snapshot']
self.parent_volume = parameters['parent_volume']
self.parent_vserver = parameters['parent_vserver']
self.qos_policy_group_name = parameters['qos_policy_group_name']
self.space_reserve = parameters['space_reserve']
self.volume = parameters['volume']
self.volume_type = parameters['volume_type']
self.vserver = parameters['vserver']
self.junction_path = parameters['junction_path']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
return
def create_volume_clone(self):
"""
Creates a new volume clone
"""
clone_obj = netapp_utils.zapi.NaElement('volume-clone-create')
clone_obj.add_new_child("parent-volume", self.parent_volume)
clone_obj.add_new_child("volume", self.volume)
if self.qos_policy_group_name:
clone_obj.add_new_child("qos-policy-group-name", self.qos_policy_group_name)
if self.space_reserve:
clone_obj.add_new_child("space-reserve", self.space_reserve)
if self.parent_snapshot:
clone_obj.add_new_child("parent-snapshot", self.parent_snapshot)
if self.parent_vserver:
clone_obj.add_new_child("parent-vserver", self.parent_vserver)
if self.volume_type:
clone_obj.add_new_child("volume-type", self.volume_type)
if self.junction_path:
clone_obj.add_new_child("junction-path", self.junction_path)
self.server.invoke_successfully(clone_obj, True)
def does_volume_clone_exists(self):
clone_obj = netapp_utils.zapi.NaElement('volume-clone-get')
clone_obj.add_new_child("volume", self.volume)
try:
results = self.server.invoke_successfully(clone_obj, True)
except netapp_utils.zapi.NaApiError:
return False
attributes = results.get_child_by_name('attributes')
info = attributes.get_child_by_name('volume-clone-info')
parent_volume = info.get_child_content('parent-volume')
if parent_volume == self.parent_volume:
return True
self.module.fail_json(msg="Error clone %s already exists for parent %s" % (self.volume, parent_volume))
def apply(self):
"""
Run Module based on play book
"""
changed = False
netapp_utils.ems_log_event("na_ontap_volume_clone", self.server)
existing_volume_clone = self.does_volume_clone_exists()
if existing_volume_clone is False: # create clone
changed = True
if changed:
if self.module.check_mode:
pass
else:
self.create_volume_clone()
self.module.exit_json(changed=changed)
def main():
"""
Creates the NetApp Ontap Volume Clone object and runs the correct play task
"""
obj = NetAppOntapVolumeClone()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
Senseg/Py4A | python3-alpha/python3-src/Lib/unittest/util.py | 794 | 4157 | """Various utility functions."""
from collections import namedtuple, OrderedDict
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance."""
missing = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
# anything left in actual is unexpected
return missing, actual
def three_way_cmp(x, y):
"""Return -1 if x < y, 0 if x == y and 1 if x > y"""
return (x > y) - (x < y)
_Mismatch = namedtuple('Mismatch', 'actual expected value')
def _count_diff_all_purpose(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements need not be hashable
s, t = list(actual), list(expected)
m, n = len(s), len(t)
NULL = object()
result = []
for i, elem in enumerate(s):
if elem is NULL:
continue
cnt_s = cnt_t = 0
for j in range(i, m):
if s[j] == elem:
cnt_s += 1
s[j] = NULL
for j, other_elem in enumerate(t):
if other_elem == elem:
cnt_t += 1
t[j] = NULL
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for i, elem in enumerate(t):
if elem is NULL:
continue
cnt_t = 0
for j in range(i, n):
if t[j] == elem:
cnt_t += 1
t[j] = NULL
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
def _ordered_count(iterable):
'Return dict of element counts, in the order they were first seen'
c = OrderedDict()
for elem in iterable:
c[elem] = c.get(elem, 0) + 1
return c
def _count_diff_hashable(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements must be hashable
s, t = _ordered_count(actual), _ordered_count(expected)
result = []
for elem, cnt_s in s.items():
cnt_t = t.get(elem, 0)
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for elem, cnt_t in t.items():
if elem not in s:
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
| apache-2.0 |
ShoRit/shipping-costs-sample | v2/lib/python2.7/site-packages/packaging/requirements.py | 140 | 4271 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pyparsing import Literal as L # noqa
from six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
| apache-2.0 |
taliax/easybuild-easyblocks | easybuild/easyblocks/p/psi.py | 12 | 7596 | ##
# Copyright 2013-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PSI, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
from distutils.version import LooseVersion
import os
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_PSI(CMakeMake):
"""
Support for building and installing PSI
"""
def __init__(self, *args, **kwargs):
"""Initialize class variables custom to PSI."""
super(EB_PSI, self).__init__(*args, **kwargs)
self.psi_srcdir = None
self.install_psi_objdir = None
self.install_psi_srcdir = None
@staticmethod
def extra_options():
"""Extra easyconfig parameters specific to PSI."""
extra_vars = {
# always include running PSI unit tests (takes about 2h or less)
'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD],
}
return CMakeMake.extra_options(extra_vars)
def configure_step(self):
"""
Configure build outside of source directory.
"""
try:
objdir = os.path.join(self.builddir, 'obj')
os.makedirs(objdir)
os.chdir(objdir)
except OSError, err:
raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err)
env.setvar('F77FLAGS', os.getenv('F90FLAGS'))
# In order to create new plugins with PSI, it needs to know the location of the source
# and the obj dir after install. These env vars give that information to the configure script.
self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep))
self.install_psi_objdir = os.path.join(self.installdir, 'obj')
self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir)
env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir)
env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir)
# explicitely specify Python binary to use
pythonroot = get_software_root('Python')
if not pythonroot:
raise EasyBuildError("Python module not loaded.")
# Use EB Boost
boostroot = get_software_root('Boost')
if not boostroot:
raise EasyBuildError("Boost module not loaded.")
# pre 4.0b5, they were using autotools, on newer it's CMake
if LooseVersion(self.version) <= LooseVersion("4.0b5"):
env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python'))
env.setvar('USE_SYSTEM_BOOST', 'TRUE')
if self.toolchain.options.get('usempi', None):
# PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly
# we should always specify the sequential Fortran compiler,
# to avoid problems with -lmpi vs -lmpi_mt during linking
fcompvar = 'F77_SEQ'
else:
fcompvar = 'F77'
# update configure options
# using multi-threaded BLAS/LAPACK is important for performance,
# cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii
opt_vars = [
('cc', 'CC'),
('cxx', 'CXX'),
('fc', fcompvar),
('libdirs', 'LDFLAGS'),
('blas', 'LIBBLAS_MT'),
('lapack', 'LIBLAPACK_MT'),
]
for (opt, var) in opt_vars:
self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var)))
# -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers
# both define SEEK_SET, this makes the one for MPI be ignored
self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS'))
# specify location of Boost
self.cfg.update('configopts', "--with-boost=%s" % boostroot)
# enable support for plugins
self.cfg.update('configopts', "--with-plugins")
ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir'])
else:
self.cfg['configopts'] += "-DPYTHON_INTERPRETER=%s " % os.path.join(pythonroot, 'bin', 'python')
self.cfg['configopts'] += "-DCMAKE_BUILD_TYPE=Release "
if self.toolchain.options.get('usempi', None):
self.cfg['configopts'] += "-DENABLE_MPI=ON "
if get_software_root('impi'):
self.cfg['configopts'] += "-DENABLE_CSR=ON -DBLAS_TYPE=MKL "
CMakeMake.configure_step(self, srcdir=self.cfg['start_dir'])
def install_step(self):
"""Custom install procedure for PSI."""
super(EB_PSI, self).install_step()
# the obj and unpacked sources must remain available for working with plugins
try:
for subdir in ['obj', self.psi_srcdir]:
# copy symlinks as symlinks to work around broken symlinks
shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir), symlinks=True)
except OSError, err:
raise EasyBuildError("Failed to copy obj and unpacked sources to install dir: %s", err)
def test_step(self):
"""
Run the testsuite of PSI4
"""
testdir = tempfile.mkdtemp()
env.setvar('PSI_SCRATCH', testdir)
super(EB_PSI, self).test_step()
try:
shutil.rmtree(testdir)
except OSError, err:
raise EasyBuildError("Failed to remove test directory %s: %s", testdir, err)
def sanity_check_step(self):
"""Custom sanity check for PSI."""
custom_paths = {
'files': ['bin/psi%s' % self.version.split('.')[0]],
'dirs': ['include', 'share/psi'],
}
super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom variables for PSI module."""
txt = super(EB_PSI, self).make_module_extra()
txt += self.module_generator.set_environment('PSI4DATADIR', os.path.join(self.installdir, 'share', 'psi'))
return txt
| gpl-2.0 |
loongson-community/EFI-MIPS | ToolKit/cmds/python/Lib/encodings/iso8859_4.py | 15 | 4105 | """ Python Character Mapping Codec generated from '8859-4.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a2: 0x0138, # LATIN SMALL LETTER KRA
0x00a3: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x00a5: 0x0128, # LATIN CAPITAL LETTER I WITH TILDE
0x00a6: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00a9: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00aa: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ab: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00ac: 0x0166, # LATIN CAPITAL LETTER T WITH STROKE
0x00ae: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00b1: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00b2: 0x02db, # OGONEK
0x00b3: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x00b5: 0x0129, # LATIN SMALL LETTER I WITH TILDE
0x00b6: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00b7: 0x02c7, # CARON
0x00b9: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ba: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x00bb: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00bc: 0x0167, # LATIN SMALL LETTER T WITH STROKE
0x00bd: 0x014a, # LATIN CAPITAL LETTER ENG
0x00be: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bf: 0x014b, # LATIN SMALL LETTER ENG
0x00c0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00c7: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00cf: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00d0: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d1: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00d2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d3: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00d9: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00dd: 0x0168, # LATIN CAPITAL LETTER U WITH TILDE
0x00de: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00e0: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x00e7: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00ef: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00f0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00f1: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00f2: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00f3: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00f9: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00fd: 0x0169, # LATIN SMALL LETTER U WITH TILDE
0x00fe: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| bsd-3-clause |
courtneypresto/googletest | test/gtest_xml_output_unittest.py | 1815 | 14580 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| bsd-3-clause |
GdZ/scriptfile | software/googleAppEngine/google/appengine/_internal/django/core/management/commands/startproject.py | 23 | 1734 | from google.appengine._internal.django.core.management.base import copy_helper, CommandError, LabelCommand
from google.appengine._internal.django.utils.importlib import import_module
import os
import re
from random import choice
class Command(LabelCommand):
help = "Creates a Django project directory structure for the given project name in the current directory."
args = "[projectname]"
label = 'project name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, project_name, **options):
# Determine the project_name a bit naively -- by looking at the name of
# the parent directory.
directory = os.getcwd()
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % project_name)
copy_helper(self.style, 'project', project_name, directory)
# Create a random SECRET_KEY hash, and put it in the main settings.
main_settings_file = os.path.join(directory, project_name, 'settings.py')
settings_contents = open(main_settings_file, 'r').read()
fp = open(main_settings_file, 'w')
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents)
fp.write(settings_contents)
fp.close()
| mit |
edcast-inc/edx-platform-edcast | openedx/core/djangoapps/user_api/migrations/0004_auto__add_userorgtag__add_unique_userorgtag_user_org_key__chg_field_us.py | 114 | 7274 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserOrgTag'
db.create_table('user_api_userorgtag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('org', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('value', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('user_api', ['UserOrgTag'])
# Adding unique constraint on 'UserOrgTag', fields ['user', 'org', 'key']
db.create_unique('user_api_userorgtag', ['user_id', 'org', 'key'])
# Create a composite index of user_id, org, and key.
db.create_index('user_api_userorgtag', ['user_id', 'org', 'key'])
# Changing field 'UserCourseTag.course_id'
db.alter_column('user_api_usercoursetag', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
def backwards(self, orm):
# Delete the composite index of user_id, org, and key.
db.delete_index('user_api_userorgtag', ['user_id', 'org', 'key'])
# Removing unique constraint on 'UserOrgTag', fields ['user', 'org', 'key']
db.delete_unique('user_api_userorgtag', ['user_id', 'org', 'key'])
# Deleting model 'UserOrgTag'
db.delete_table('user_api_userorgtag')
# Changing field 'UserCourseTag.course_id'
db.alter_column('user_api_usercoursetag', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'user_api.usercoursetag': {
'Meta': {'unique_together': "(('user', 'course_id', 'key'),)", 'object_name': 'UserCourseTag'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'user_api.userorgtag': {
'Meta': {'unique_together': "(('user', 'org', 'key'),)", 'object_name': 'UserOrgTag'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'org': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'user_api.userpreference': {
'Meta': {'unique_together': "(('user', 'key'),)", 'object_name': 'UserPreference'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'preferences'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['user_api']
| agpl-3.0 |
hmpf/nav | python/nav/metrics/data.py | 2 | 8012 | #
# Copyright (C) 2013 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Retrieval and calculations on raw numbers from Graphite metrics"""
import codecs
from datetime import datetime
import json
import logging
from django.utils.six.moves.urllib.parse import urlencode, urljoin
from django.utils.six.moves.urllib.request import Request, urlopen
from django.utils.six.moves.urllib.error import HTTPError, URLError
from nav.metrics import CONFIG, errors
from nav.metrics.templates import (metric_path_for_packet_loss,
metric_path_for_roundtrip_time)
_logger = logging.getLogger(__name__)
def get_metric_average(target, start="-5min", end="now", ignore_unknown=True):
"""Calculates the average value of a metric over a given period of time
:param target: A metric path string or a list of multiple metric paths
:param start: A start time specification that Graphite will accept.
:param end: An end time specification that Graphite will accept.
:param ignore_unknown: Ignore unknown values when calculating the average.
Unless True, any unknown data in the series will
result in an average value of None.
:returns: A dict of {target: average_value} items. Targets that weren't
found in Graphite will not be present in the dict.
"""
start_time = datetime.now()
data = get_metric_data(target, start, end)
result = {}
for target in data:
dpoints = [d[0] for d in target['datapoints']
if not (ignore_unknown and d[0] is None)]
if dpoints:
if None in dpoints:
avg = None
else:
avg = sum(dpoints) / len(dpoints)
result[target['target']] = avg
_logger.debug('Got metric average for %s targets in %s seconds',
len(data), datetime.now() - start_time)
return result
def get_metric_max(target, start="-5min", end="now"):
data = get_metric_data(target, start, end)
result = {}
for target in data:
dpoints = [d[0] for d in target['datapoints'] if d[0] is not None]
if dpoints:
if None in dpoints:
maximum = None
else:
maximum = max(dpoints)
result[target['target']] = maximum
return result
def get_metric_data(target, start="-5min", end="now"):
"""
Retrieves raw datapoints from a graphite target for a given period of time.
:param target: A metric path string or a list of multiple metric paths
:param start: A start time specification that Graphite will accept.
:param end: An end time specification that Graphite will accept.
:returns: A raw, response from Graphite. Normally a list of dicts that
represent the names and datapoints of each matched target,
like so::
[{'target': 'x', 'datapoints': [(value, timestamp), ...]}]
"""
if not target:
return [] # no point in wasting time on http requests for no data
base = CONFIG.get("graphiteweb", "base")
url = urljoin(base, "/render/")
# What does Graphite accept of formats? Lets check if the parameters are
# datetime objects and try to force a format then
if isinstance(start, datetime):
start = start.strftime('%H:%M%Y%m%d')
if isinstance(end, datetime):
end = end.strftime('%H:%M%Y%m%d')
query = {
'target': target,
'from': start,
'until': end,
'format': 'json',
}
query = urlencode(query, True)
_logger.debug("get_metric_data%r", (target, start, end))
req = Request(url, data=query.encode('utf-8'))
try:
response = urlopen(req)
json_data = json.load(codecs.getreader('utf-8')(response))
_logger.debug("get_metric_data: returning %d results", len(json_data))
return json_data
except HTTPError as err:
_logger.error("Got a 500 error from graphite-web when fetching %s"
"with data %s", err.url, query)
_logger.error("Graphite output: %s", err.fp.read())
raise errors.GraphiteUnreachableError(
"{0} is unreachable".format(base), err)
except URLError as err:
raise errors.GraphiteUnreachableError(
"{0} is unreachable".format(base), err)
except ValueError:
# response could not be decoded
return []
finally:
try:
response.close()
except NameError:
pass
DEFAULT_TIME_FRAMES = ('day', 'week', 'month')
DEFAULT_DATA_SOURCES = ('availability', 'response_time')
METRIC_PATH_LOOKUP = {
'availability': metric_path_for_packet_loss,
'response_time': metric_path_for_roundtrip_time
}
def get_netboxes_availability(netboxes, data_sources=DEFAULT_DATA_SOURCES,
time_frames=DEFAULT_TIME_FRAMES,
start_time=None, end_time=None):
"""Calculates and returns an availability data structure for a list of
netboxes.
:type netboxes: list[Netbox] | QuerySet[Netbox]
:type data_sources: list[str]
:type time_frames: list[str]
"""
if not netboxes:
return {}
assert all(x in DEFAULT_TIME_FRAMES for x in time_frames)
assert all(x in DEFAULT_DATA_SOURCES for x in data_sources)
result = {}
targets = []
for netbox in netboxes:
result[netbox.id] = {}
for data_source in data_sources:
metric_resolver = METRIC_PATH_LOOKUP[data_source]
data_source_id = metric_resolver(netbox.sysname)
targets.append(data_source_id)
result[netbox.id][data_source] = {
'data_source': data_source_id,
}
if start_time:
populate_for_interval(result, targets, netboxes, start_time, end_time)
else:
populate_for_time_frame(result, targets, netboxes, time_frames)
return result
def populate_for_interval(result, targets, netboxes, start_time, end_time):
"""Populate results based on a time interval"""
avg = get_metric_average(targets, start=start_time, end=end_time)
for netbox in netboxes:
root = result[netbox.id]
# Availability
if 'availability' in root:
pktloss = avg.get(root['availability']['data_source'])
if pktloss is not None:
pktloss = 100 - (pktloss * 100)
root['availability'] = pktloss
# Response time
if 'response_time' in root:
root['response_time'] = avg.get(
root['response_time']['data_source'])
def populate_for_time_frame(result, targets, netboxes, time_frames):
"""Populate results based on a list of time frames"""
for time_frame in time_frames:
avg = get_metric_average(targets, start="-1%s" % time_frame)
for netbox in netboxes:
root = result[netbox.id]
# Availability
if 'availability' in root:
pktloss = avg.get(root['availability']['data_source'])
if pktloss is not None:
pktloss = 100 - (pktloss * 100)
root['availability'][time_frame] = pktloss
# Response time
if 'response_time' in root:
root['response_time'][time_frame] = avg.get(
root['response_time']['data_source'])
| gpl-3.0 |
annarev/tensorflow | tensorflow/python/keras/layers/advanced_activations_test.py | 2 | 5240 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for advanced activation layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class AdvancedActivationsTest(keras_parameterized.TestCase):
def test_leaky_relu(self):
for alpha in [0., .5, -1.]:
testing_utils.layer_test(keras.layers.LeakyReLU,
kwargs={'alpha': alpha},
input_shape=(2, 3, 4),
supports_masking=True)
def test_prelu(self):
testing_utils.layer_test(keras.layers.PReLU, kwargs={},
input_shape=(2, 3, 4),
supports_masking=True)
def test_prelu_share(self):
testing_utils.layer_test(keras.layers.PReLU,
kwargs={'shared_axes': 1},
input_shape=(2, 3, 4),
supports_masking=True)
def test_elu(self):
for alpha in [0., .5, -1.]:
testing_utils.layer_test(keras.layers.ELU,
kwargs={'alpha': alpha},
input_shape=(2, 3, 4),
supports_masking=True)
def test_thresholded_relu(self):
testing_utils.layer_test(keras.layers.ThresholdedReLU,
kwargs={'theta': 0.5},
input_shape=(2, 3, 4),
supports_masking=True)
def test_softmax(self):
testing_utils.layer_test(keras.layers.Softmax,
kwargs={'axis': 1},
input_shape=(2, 3, 4),
supports_masking=True)
def test_relu(self):
testing_utils.layer_test(keras.layers.ReLU,
kwargs={'max_value': 10},
input_shape=(2, 3, 4),
supports_masking=True)
x = keras.backend.ones((3, 4))
if not context.executing_eagerly():
# Test that we use `leaky_relu` when appropriate in graph mode.
self.assertTrue(
'LeakyRelu' in keras.layers.ReLU(negative_slope=0.2)(x).name)
# Test that we use `relu` when appropriate in graph mode.
self.assertTrue('Relu' in keras.layers.ReLU()(x).name)
# Test that we use `relu6` when appropriate in graph mode.
self.assertTrue('Relu6' in keras.layers.ReLU(max_value=6)(x).name)
def test_relu_with_invalid_arg(self):
with self.assertRaisesRegex(
ValueError, 'max_value of Relu layer cannot be negative value: -10'):
testing_utils.layer_test(keras.layers.ReLU,
kwargs={'max_value': -10},
input_shape=(2, 3, 4),
supports_masking=True)
with self.assertRaisesRegex(
ValueError,
'negative_slope of Relu layer cannot be negative value: -2'):
with self.cached_session():
testing_utils.layer_test(
keras.layers.ReLU,
kwargs={'negative_slope': -2},
input_shape=(2, 3, 4))
@keras_parameterized.run_with_all_model_types
def test_layer_as_activation(self):
layer = keras.layers.Dense(1, activation=keras.layers.ReLU())
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2)
def test_leaky_relu_with_invalid_alpha(self):
# Test case for GitHub issue 46993.
with self.assertRaisesRegex(ValueError,
'alpha of leaky Relu layer cannot be None'):
testing_utils.layer_test(
keras.layers.LeakyReLU,
kwargs={'alpha': None},
input_shape=(2, 3, 4),
supports_masking=True)
def test_leaky_elu_with_invalid_alpha(self):
# Test case for GitHub issue 46993.
with self.assertRaisesRegex(ValueError,
'alpha of ELU layer cannot be None'):
testing_utils.layer_test(
keras.layers.ELU,
kwargs={'alpha': None},
input_shape=(2, 3, 4),
supports_masking=True)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Lujeni/ansible | lib/ansible/modules/network/fortios/fortios_firewall_vip46.py | 13 | 19391 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vip46
short_description: Configure IPv4 to IPv6 virtual IPs in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and vip46 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_vip46:
description:
- Configure IPv4 to IPv6 virtual IPs.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
arp_reply:
description:
- Enable ARP reply.
type: str
choices:
- disable
- enable
color:
description:
- Color of icon on the GUI.
type: int
comment:
description:
- Comment.
type: str
extip:
description:
- Start-external-IP [-end-external-IP].
type: str
extport:
description:
- External service port.
type: str
id:
description:
- Custom defined id.
type: int
ldb_method:
description:
- Load balance method.
type: str
choices:
- static
- round-robin
- weighted
- least-session
- least-rtt
- first-alive
mappedip:
description:
- Start-mapped-IP [-end mapped-IP].
type: str
mappedport:
description:
- Mapped service port.
type: str
monitor:
description:
- Health monitors.
type: list
suboptions:
name:
description:
- Health monitor name. Source firewall.ldb-monitor.name.
required: true
type: str
name:
description:
- VIP46 name.
required: true
type: str
portforward:
description:
- Enable port forwarding.
type: str
choices:
- disable
- enable
protocol:
description:
- Mapped port protocol.
type: str
choices:
- tcp
- udp
realservers:
description:
- Real servers.
type: list
suboptions:
client_ip:
description:
- Restrict server to a client IP in this range.
type: str
healthcheck:
description:
- Per server health check.
type: str
choices:
- disable
- enable
- vip
holddown_interval:
description:
- Hold down interval.
type: int
id:
description:
- Real server ID.
required: true
type: int
ip:
description:
- Mapped server IPv6.
type: str
max_connections:
description:
- Maximum number of connections allowed to server.
type: int
monitor:
description:
- Health monitors. Source firewall.ldb-monitor.name.
type: str
port:
description:
- Mapped server port.
type: int
status:
description:
- Server administrative status.
type: str
choices:
- active
- standby
- disable
weight:
description:
- weight
type: int
server_type:
description:
- Server type.
type: str
choices:
- http
- tcp
- udp
- ip
src_filter:
description:
- Source IP filter (x.x.x.x/x).
type: list
suboptions:
range:
description:
- Src-filter range.
required: true
type: str
type:
description:
- "VIP type: static NAT or server load balance."
type: str
choices:
- static-nat
- server-load-balance
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv4 to IPv6 virtual IPs.
fortios_firewall_vip46:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_vip46:
arp_reply: "disable"
color: "4"
comment: "Comment."
extip: "<your_own_value>"
extport: "<your_own_value>"
id: "8"
ldb_method: "static"
mappedip: "<your_own_value>"
mappedport: "<your_own_value>"
monitor:
-
name: "default_name_13 (source firewall.ldb-monitor.name)"
name: "default_name_14"
portforward: "disable"
protocol: "tcp"
realservers:
-
client_ip: "<your_own_value>"
healthcheck: "disable"
holddown_interval: "20"
id: "21"
ip: "<your_own_value>"
max_connections: "23"
monitor: "<your_own_value> (source firewall.ldb-monitor.name)"
port: "25"
status: "active"
weight: "27"
server_type: "http"
src_filter:
-
range: "<your_own_value>"
type: "static-nat"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_vip46_data(json):
option_list = ['arp_reply', 'color', 'comment',
'extip', 'extport', 'id',
'ldb_method', 'mappedip', 'mappedport',
'monitor', 'name', 'portforward',
'protocol', 'realservers', 'server_type',
'src_filter', 'type', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_vip46(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_vip46'] and data['firewall_vip46']:
state = data['firewall_vip46']['state']
else:
state = True
firewall_vip46_data = data['firewall_vip46']
filtered_data = underscore_to_hyphen(filter_firewall_vip46_data(firewall_vip46_data))
if state == "present":
return fos.set('firewall',
'vip46',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'vip46',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_vip46']:
resp = firewall_vip46(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_vip46": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"arp_reply": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"extip": {"required": False, "type": "str"},
"extport": {"required": False, "type": "str"},
"id": {"required": False, "type": "int"},
"ldb_method": {"required": False, "type": "str",
"choices": ["static", "round-robin", "weighted",
"least-session", "least-rtt", "first-alive"]},
"mappedip": {"required": False, "type": "str"},
"mappedport": {"required": False, "type": "str"},
"monitor": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"portforward": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"protocol": {"required": False, "type": "str",
"choices": ["tcp", "udp"]},
"realservers": {"required": False, "type": "list",
"options": {
"client_ip": {"required": False, "type": "str"},
"healthcheck": {"required": False, "type": "str",
"choices": ["disable", "enable", "vip"]},
"holddown_interval": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"},
"max_connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["active", "standby", "disable"]},
"weight": {"required": False, "type": "int"}
}},
"server_type": {"required": False, "type": "str",
"choices": ["http", "tcp", "udp",
"ip"]},
"src_filter": {"required": False, "type": "list",
"options": {
"range": {"required": True, "type": "str"}
}},
"type": {"required": False, "type": "str",
"choices": ["static-nat", "server-load-balance"]},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
aronsky/home-assistant | homeassistant/components/scene/__init__.py | 2 | 3231 | """
Allow users to set and activate scenes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/scene/
"""
import asyncio
import importlib
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_PLATFORM, SERVICE_TURN_ON)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.state import HASS_DOMAIN
DOMAIN = 'scene'
STATE = 'scening'
STATES = 'states'
def _hass_domain_validator(config):
"""Validate platform in config for homeassistant domain."""
if CONF_PLATFORM not in config:
config = {
CONF_PLATFORM: HASS_DOMAIN, STATES: config}
return config
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
'homeassistant.components.scene.{}'.format(
config[CONF_PLATFORM]))
except ImportError:
raise vol.Invalid('Invalid platform specified') from None
if not hasattr(platform, 'PLATFORM_SCHEMA'):
return config
return platform.PLATFORM_SCHEMA(config)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
_hass_domain_validator,
vol.Schema({
vol.Required(CONF_PLATFORM): str
}, extra=vol.ALLOW_EXTRA),
_platform_validator
), extra=vol.ALLOW_EXTRA)
SCENE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
async def async_setup(hass, config):
"""Set up the scenes."""
logger = logging.getLogger(__name__)
component = hass.data[DOMAIN] = EntityComponent(logger, DOMAIN, hass)
await component.async_setup(config)
async def async_handle_scene_service(service):
"""Handle calls to the switch services."""
target_scenes = component.async_extract_from_service(service)
tasks = [scene.async_activate() for scene in target_scenes]
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_scene_service,
schema=SCENE_SERVICE_SCHEMA)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Scene(Entity):
"""A scene is a group of entities and the states we want them to be."""
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the scene."""
return STATE
def activate(self):
"""Activate scene. Try to get entities into requested state."""
raise NotImplementedError()
def async_activate(self):
"""Activate scene. Try to get entities into requested state.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.activate)
| apache-2.0 |
horance-liu/tensorflow | tensorflow/python/keras/applications/xception/__init__.py | 74 | 1142 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.applications.xception import decode_predictions
from tensorflow.python.keras._impl.keras.applications.xception import preprocess_input
from tensorflow.python.keras._impl.keras.applications.xception import Xception
del absolute_import
del division
del print_function
| apache-2.0 |
ice9js/servo | tests/wpt/harness/wptrunner/executors/executorservo.py | 6 | 9372 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import base64
import hashlib
import json
import os
import subprocess
import tempfile
import threading
import urlparse
import uuid
from collections import defaultdict
from mozprocess import ProcessHandler
from .base import (ExecutorException,
Protocol,
RefTestImplementation,
testharness_result_converter,
reftest_result_converter)
from .process import ProcessTestExecutor
from ..browsers.base import browser_command
render_arg = None
def do_delayed_imports():
global render_arg
from ..browsers.servo import render_arg
hosts_text = """127.0.0.1 web-platform.test
127.0.0.1 www.web-platform.test
127.0.0.1 www1.web-platform.test
127.0.0.1 www2.web-platform.test
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
127.0.0.1 xn--lve-6lad.web-platform.test
"""
def make_hosts_file():
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(hosts_text)
return hosts_path
class ServoTestharnessExecutor(ProcessTestExecutor):
convert_result = testharness_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
pause_after_test=False):
do_delayed_imports()
ProcessTestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.result_data = None
self.result_flag = None
self.protocol = Protocol(self, browser)
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
ProcessTestExecutor.teardown(self)
def do_test(self, test):
self.result_data = None
self.result_flag = threading.Event()
args = [render_arg(self.browser.render_backend), "--hard-fail", "-u", "Servo/wptrunner",
"-Z", "replace-surrogates", "-z", self.test_url(test)]
for stylesheet in self.browser.user_stylesheets:
args += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
args += ["--pref", "%s=%s" % (pref, value)]
args += self.browser.binary_args
debug_args, command = browser_command(self.binary, args, self.debug_info)
self.command = command
if self.pause_after_test:
self.command.remove("-z")
self.command = debug_args + self.command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
onFinish=self.on_finish,
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
try:
timeout = test.timeout * self.timeout_multiplier
# Now wait to get the output we expect, or until we reach the timeout
if not self.interactive and not self.pause_after_test:
wait_timeout = timeout + 5
self.result_flag.wait(wait_timeout)
else:
wait_timeout = None
self.proc.wait()
proc_is_running = True
if self.result_flag.is_set():
if self.result_data is not None:
result = self.convert_result(test, self.result_data)
else:
self.proc.wait()
result = (test.result_cls("CRASH", None), [])
proc_is_running = False
else:
result = (test.result_cls("TIMEOUT", None), [])
if proc_is_running:
if self.pause_after_test:
self.logger.info("Pausing until the browser exits")
self.proc.wait()
else:
self.proc.kill()
except KeyboardInterrupt:
self.proc.kill()
raise
return result
def on_output(self, line):
prefix = "ALERT: RESULT: "
line = line.decode("utf8", "replace")
if line.startswith(prefix):
self.result_data = json.loads(line[len(prefix):])
self.result_flag.set()
else:
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
def on_finish(self):
self.result_flag.set()
class TempFilename(object):
def __init__(self, directory):
self.directory = directory
self.path = None
def __enter__(self):
self.path = os.path.join(self.directory, str(uuid.uuid4()))
return self.path
def __exit__(self, *args, **kwargs):
try:
os.unlink(self.path)
except OSError:
pass
class ServoRefTestExecutor(ProcessTestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False):
do_delayed_imports()
ProcessTestExecutor.__init__(self,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = Protocol(self, browser)
self.screenshot_cache = screenshot_cache
self.implementation = RefTestImplementation(self)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
os.rmdir(self.tempdir)
ProcessTestExecutor.teardown(self)
def screenshot(self, test, viewport_size, dpi):
full_url = self.test_url(test)
with TempFilename(self.tempdir) as output_path:
debug_args, command = browser_command(
self.binary,
[render_arg(self.browser.render_backend), "--hard-fail", "--exit",
"-u", "Servo/wptrunner", "-Z", "disable-text-aa,load-webfonts-synchronously,replace-surrogates",
"--output=%s" % output_path, full_url],
self.debug_info)
for stylesheet in self.browser.user_stylesheets:
command += ["--user-stylesheet", stylesheet]
for pref in test.environment.get('prefs', {}):
command += ["--pref", pref]
if viewport_size:
command += ["--resolution", viewport_size]
else:
command += ["--resolution", "800x600"]
if dpi:
command += ["--device-pixel-ratio", dpi]
self.command = debug_args + command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env)
try:
self.proc.run()
timeout = test.timeout * self.timeout_multiplier + 5
rv = self.proc.wait(timeout=timeout)
except KeyboardInterrupt:
self.proc.kill()
raise
else:
self.proc = subprocess.Popen(self.command,
env=env)
try:
rv = self.proc.wait()
except KeyboardInterrupt:
self.proc.kill()
raise
if rv is None:
self.proc.kill()
return False, ("EXTERNAL-TIMEOUT", None)
if rv != 0 or not os.path.exists(output_path):
return False, ("CRASH", None)
with open(output_path) as f:
# Might need to strip variable headers or something here
data = f.read()
return True, base64.b64encode(data)
def do_test(self, test):
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def on_output(self, line):
line = line.decode("utf8", "replace")
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
| mpl-2.0 |
javachengwc/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Signature/test_pkcs1_pss.py | 113 | 20598 | # -*- coding: utf-8 -*-
#
# SelfTest/Signature/test_pkcs1_pss.py: Self-test for PKCS#1 PSS signatures
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from __future__ import nested_scopes
__revision__ = "$Id$"
import unittest
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Hash import *
from Crypto.Signature import PKCS1_PSS as PKCS
from Crypto.Util.py3compat import *
def isStr(s):
t = ''
try:
t += s
except TypeError:
return 0
return 1
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\t', '\n', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
# Helper class to count how many bytes have been requested
# from the key's private RNG, w/o counting those used for blinding
class MyKey:
def __init__(self, key):
self._key = key
self.n = key.n
self.asked = 0
def _randfunc(self, N):
self.asked += N
return self._key._randfunc(N)
def sign(self, m):
return self._key.sign(m)
def has_private(self):
return self._key.has_private()
def decrypt(self, m):
return self._key.decrypt(m)
def verify(self, m, p):
return self._key.verify(m, p)
def encrypt(self, m, p):
return self._key.encrypt(m, p)
class PKCS1_PSS_Tests(unittest.TestCase):
# List of tuples with test data for PKCS#1 PSS
# Each tuple is made up by:
# Item #0: dictionary with RSA key component, or key to import
# Item #1: data to hash and sign
# Item #2: signature of the data #1, done with the key #0,
# and salt #3 after hashing it with #4
# Item #3: salt
# Item #4: hash object generator
_testData = (
#
# From in pss-vect.txt to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a2 ba 40 ee 07 e3 b2 bd 2f 02 ce 22 7f 36 a1 95
02 44 86 e4 9c 19 cb 41 bb bd fb ba 98 b2 2b 0e
57 7c 2e ea ff a2 0d 88 3a 76 e6 5e 39 4c 69 d4
b3 c0 5a 1e 8f ad da 27 ed b2 a4 2b c0 00 fe 88
8b 9b 32 c2 2d 15 ad d0 cd 76 b3 e7 93 6e 19 95
5b 22 0d d1 7d 4e a9 04 b1 ec 10 2b 2e 4d e7 75
12 22 aa 99 15 10 24 c7 cb 41 cc 5e a2 1d 00 ee
b4 1f 7c 80 08 34 d2 c6 e0 6b ce 3b ce 7e a9 a5''',
'e':'''01 00 01''',
# In the test vector, only p and q were given...
# d is computed offline as e^{-1} mod (p-1)(q-1)
'd':'''50e2c3e38d886110288dfc68a9533e7e12e27d2aa56
d2cdb3fb6efa990bcff29e1d2987fb711962860e7391b1ce01
ebadb9e812d2fbdfaf25df4ae26110a6d7a26f0b810f54875e
17dd5c9fb6d641761245b81e79f8c88f0e55a6dcd5f133abd3
5f8f4ec80adf1bf86277a582894cb6ebcd2162f1c7534f1f49
47b129151b71'''
},
# Data to sign
'''85 9e ef 2f d7 8a ca 00 30 8b dc 47 11 93 bf 55
bf 9d 78 db 8f 8a 67 2b 48 46 34 f3 c9 c2 6e 64
78 ae 10 26 0f e0 dd 8c 08 2e 53 a5 29 3a f2 17
3c d5 0c 6d 5d 35 4f eb f7 8b 26 02 1c 25 c0 27
12 e7 8c d4 69 4c 9f 46 97 77 e4 51 e7 f8 e9 e0
4c d3 73 9c 6b bf ed ae 48 7f b5 56 44 e9 ca 74
ff 77 a5 3c b7 29 80 2f 6e d4 a5 ff a8 ba 15 98
90 fc''',
# Signature
'''8d aa 62 7d 3d e7 59 5d 63 05 6c 7e c6 59 e5 44
06 f1 06 10 12 8b aa e8 21 c8 b2 a0 f3 93 6d 54
dc 3b dc e4 66 89 f6 b7 95 1b b1 8e 84 05 42 76
97 18 d5 71 5d 21 0d 85 ef bb 59 61 92 03 2c 42
be 4c 29 97 2c 85 62 75 eb 6d 5a 45 f0 5f 51 87
6f c6 74 3d ed dd 28 ca ec 9b b3 0e a9 9e 02 c3
48 82 69 60 4f e4 97 f7 4c cd 7c 7f ca 16 71 89
71 23 cb d3 0d ef 5d 54 a2 b5 53 6a d9 0a 74 7e''',
# Salt
'''e3 b5 d5 d0 02 c1 bc e5 0c 2b 65 ef 88 a1 88 d8
3b ce 7e 61''',
# Hash algorithm
SHA
),
#
# Example 1.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''cd c8 7d a2 23 d7 86 df 3b 45 e0 bb bc 72 13 26
d1 ee 2a f8 06 cc 31 54 75 cc 6f 0d 9c 66 e1 b6
23 71 d4 5c e2 39 2e 1a c9 28 44 c3 10 10 2f 15
6a 0d 8d 52 c1 f4 c4 0b a3 aa 65 09 57 86 cb 76
97 57 a6 56 3b a9 58 fe d0 bc c9 84 e8 b5 17 a3
d5 f5 15 b2 3b 8a 41 e7 4a a8 67 69 3f 90 df b0
61 a6 e8 6d fa ae e6 44 72 c0 0e 5f 20 94 57 29
cb eb e7 7f 06 ce 78 e0 8f 40 98 fb a4 1f 9d 61
93 c0 31 7e 8b 60 d4 b6 08 4a cb 42 d2 9e 38 08
a3 bc 37 2d 85 e3 31 17 0f cb f7 cc 72 d0 b7 1c
29 66 48 b3 a4 d1 0f 41 62 95 d0 80 7a a6 25 ca
b2 74 4f d9 ea 8f d2 23 c4 25 37 02 98 28 bd 16
be 02 54 6f 13 0f d2 e3 3b 93 6d 26 76 e0 8a ed
1b 73 31 8b 75 0a 01 67 d0''',
# Signature
'''90 74 30 8f b5 98 e9 70 1b 22 94 38 8e 52 f9 71
fa ac 2b 60 a5 14 5a f1 85 df 52 87 b5 ed 28 87
e5 7c e7 fd 44 dc 86 34 e4 07 c8 e0 e4 36 0b c2
26 f3 ec 22 7f 9d 9e 54 63 8e 8d 31 f5 05 12 15
df 6e bb 9c 2f 95 79 aa 77 59 8a 38 f9 14 b5 b9
c1 bd 83 c4 e2 f9 f3 82 a0 d0 aa 35 42 ff ee 65
98 4a 60 1b c6 9e b2 8d eb 27 dc a1 2c 82 c2 d4
c3 f6 6c d5 00 f1 ff 2b 99 4d 8a 4e 30 cb b3 3c''',
# Salt
'''de e9 59 c7 e0 64 11 36 14 20 ff 80 18 5e d5 7f
3e 67 76 af''',
# Hash
SHA
),
#
# Example 1.2 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''85 13 84 cd fe 81 9c 22 ed 6c 4c cb 30 da eb 5c
f0 59 bc 8e 11 66 b7 e3 53 0c 4c 23 3e 2b 5f 8f
71 a1 cc a5 82 d4 3e cc 72 b1 bc a1 6d fc 70 13
22 6b 9e''',
# Signature
'''3e f7 f4 6e 83 1b f9 2b 32 27 41 42 a5 85 ff ce
fb dc a7 b3 2a e9 0d 10 fb 0f 0c 72 99 84 f0 4e
f2 9a 9d f0 78 07 75 ce 43 73 9b 97 83 83 90 db
0a 55 05 e6 3d e9 27 02 8d 9d 29 b2 19 ca 2c 45
17 83 25 58 a5 5d 69 4a 6d 25 b9 da b6 60 03 c4
cc cd 90 78 02 19 3b e5 17 0d 26 14 7d 37 b9 35
90 24 1b e5 1c 25 05 5f 47 ef 62 75 2c fb e2 14
18 fa fe 98 c2 2c 4d 4d 47 72 4f db 56 69 e8 43''',
# Salt
'''ef 28 69 fa 40 c3 46 cb 18 3d ab 3d 7b ff c9 8f
d5 6d f4 2d''',
# Hash
SHA
),
#
# Example 2.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''01 d4 0c 1b cf 97 a6 8a e7 cd bd 8a 7b f3 e3 4f
a1 9d cc a4 ef 75 a4 74 54 37 5f 94 51 4d 88 fe
d0 06 fb 82 9f 84 19 ff 87 d6 31 5d a6 8a 1f f3
a0 93 8e 9a bb 34 64 01 1c 30 3a d9 91 99 cf 0c
7c 7a 8b 47 7d ce 82 9e 88 44 f6 25 b1 15 e5 e9
c4 a5 9c f8 f8 11 3b 68 34 33 6a 2f d2 68 9b 47
2c bb 5e 5c ab e6 74 35 0c 59 b6 c1 7e 17 68 74
fb 42 f8 fc 3d 17 6a 01 7e dc 61 fd 32 6c 4b 33
c9''',
'e':'''01 00 01''',
'd':'''02 7d 14 7e 46 73 05 73 77 fd 1e a2 01 56 57 72
17 6a 7d c3 83 58 d3 76 04 56 85 a2 e7 87 c2 3c
15 57 6b c1 6b 9f 44 44 02 d6 bf c5 d9 8a 3e 88
ea 13 ef 67 c3 53 ec a0 c0 dd ba 92 55 bd 7b 8b
b5 0a 64 4a fd fd 1d d5 16 95 b2 52 d2 2e 73 18
d1 b6 68 7a 1c 10 ff 75 54 5f 3d b0 fe 60 2d 5f
2b 7f 29 4e 36 01 ea b7 b9 d1 ce cd 76 7f 64 69
2e 3e 53 6c a2 84 6c b0 c2 dd 48 6a 39 fa 75 b1'''
},
# Message
'''da ba 03 20 66 26 3f ae db 65 98 48 11 52 78 a5
2c 44 fa a3 a7 6f 37 51 5e d3 36 32 10 72 c4 0a
9d 9b 53 bc 05 01 40 78 ad f5 20 87 51 46 aa e7
0f f0 60 22 6d cb 7b 1f 1f c2 7e 93 60''',
# Signature
'''01 4c 5b a5 33 83 28 cc c6 e7 a9 0b f1 c0 ab 3f
d6 06 ff 47 96 d3 c1 2e 4b 63 9e d9 13 6a 5f ec
6c 16 d8 88 4b dd 99 cf dc 52 14 56 b0 74 2b 73
68 68 cf 90 de 09 9a db 8d 5f fd 1d ef f3 9b a4
00 7a b7 46 ce fd b2 2d 7d f0 e2 25 f5 46 27 dc
65 46 61 31 72 1b 90 af 44 53 63 a8 35 8b 9f 60
76 42 f7 8f ab 0a b0 f4 3b 71 68 d6 4b ae 70 d8
82 78 48 d8 ef 1e 42 1c 57 54 dd f4 2c 25 89 b5
b3''',
# Salt
'''57 bf 16 0b cb 02 bb 1d c7 28 0c f0 45 85 30 b7
d2 83 2f f7''',
SHA
),
#
# Example 8.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''49 53 70 a1 fb 18 54 3c 16 d3 63 1e 31 63 25 5d
f6 2b e6 ee e8 90 d5 f2 55 09 e4 f7 78 a8 ea 6f
bb bc df 85 df f6 4e 0d 97 20 03 ab 36 81 fb ba
6d d4 1f d5 41 82 9b 2e 58 2d e9 f2 a4 a4 e0 a2
d0 90 0b ef 47 53 db 3c ee 0e e0 6c 7d fa e8 b1
d5 3b 59 53 21 8f 9c ce ea 69 5b 08 66 8e de aa
dc ed 94 63 b1 d7 90 d5 eb f2 7e 91 15 b4 6c ad
4d 9a 2b 8e fa b0 56 1b 08 10 34 47 39 ad a0 73
3f''',
'e':'''01 00 01''',
'd':'''6c 66 ff e9 89 80 c3 8f cd ea b5 15 98 98 83 61
65 f4 b4 b8 17 c4 f6 a8 d4 86 ee 4e a9 13 0f e9
b9 09 2b d1 36 d1 84 f9 5f 50 4a 60 7e ac 56 58
46 d2 fd d6 59 7a 89 67 c7 39 6e f9 5a 6e ee bb
45 78 a6 43 96 6d ca 4d 8e e3 de 84 2d e6 32 79
c6 18 15 9c 1a b5 4a 89 43 7b 6a 61 20 e4 93 0a
fb 52 a4 ba 6c ed 8a 49 47 ac 64 b3 0a 34 97 cb
e7 01 c2 d6 26 6d 51 72 19 ad 0e c6 d3 47 db e9'''
},
# Message
'''81 33 2f 4b e6 29 48 41 5e a1 d8 99 79 2e ea cf
6c 6e 1d b1 da 8b e1 3b 5c ea 41 db 2f ed 46 70
92 e1 ff 39 89 14 c7 14 25 97 75 f5 95 f8 54 7f
73 56 92 a5 75 e6 92 3a f7 8f 22 c6 99 7d db 90
fb 6f 72 d7 bb 0d d5 74 4a 31 de cd 3d c3 68 58
49 83 6e d3 4a ec 59 63 04 ad 11 84 3c 4f 88 48
9f 20 97 35 f5 fb 7f da f7 ce c8 ad dc 58 18 16
8f 88 0a cb f4 90 d5 10 05 b7 a8 e8 4e 43 e5 42
87 97 75 71 dd 99 ee a4 b1 61 eb 2d f1 f5 10 8f
12 a4 14 2a 83 32 2e db 05 a7 54 87 a3 43 5c 9a
78 ce 53 ed 93 bc 55 08 57 d7 a9 fb''',
# Signature
'''02 62 ac 25 4b fa 77 f3 c1 ac a2 2c 51 79 f8 f0
40 42 2b 3c 5b af d4 0a 8f 21 cf 0f a5 a6 67 cc
d5 99 3d 42 db af b4 09 c5 20 e2 5f ce 2b 1e e1
e7 16 57 7f 1e fa 17 f3 da 28 05 2f 40 f0 41 9b
23 10 6d 78 45 aa f0 11 25 b6 98 e7 a4 df e9 2d
39 67 bb 00 c4 d0 d3 5b a3 55 2a b9 a8 b3 ee f0
7c 7f ec db c5 42 4a c4 db 1e 20 cb 37 d0 b2 74
47 69 94 0e a9 07 e1 7f bb ca 67 3b 20 52 23 80
c5''',
# Salt
'''1d 65 49 1d 79 c8 64 b3 73 00 9b e6 f6 f2 46 7b
ac 4c 78 fa''',
SHA
)
)
def testSign1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e','d') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
key._randfunc = lambda N: test_salt
# The real test
signer = PKCS.new(key)
self.failUnless(signer.can_sign())
s = signer.sign(h)
self.assertEqual(s, t2b(self._testData[i][2]))
def testVerify1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
# The real test
key._randfunc = lambda N: test_salt
verifier = PKCS.new(key)
self.failIf(verifier.can_sign())
result = verifier.verify(h, t2b(self._testData[i][2]))
self.failUnless(result)
def testSignVerify(self):
h = SHA.new()
h.update(b('blah blah blah'))
rng = Random.new().read
key = MyKey(RSA.generate(1024,rng))
# Helper function to monitor what's request from MGF
global mgfcalls
def newMGF(seed,maskLen):
global mgfcalls
mgfcalls += 1
return bchr(0x00)*maskLen
# Verify that PSS is friendly to all ciphers
for hashmod in (MD2,MD5,SHA,SHA224,SHA256,SHA384,RIPEMD):
h = hashmod.new()
h.update(b('blah blah blah'))
# Verify that sign() asks for as many random bytes
# as the hash output size
key.asked = 0
signer = PKCS.new(key)
s = signer.sign(h)
self.failUnless(signer.verify(h, s))
self.assertEqual(key.asked, h.digest_size)
h = SHA.new()
h.update(b('blah blah blah'))
# Verify that sign() uses a different salt length
for sLen in (0,3,21):
key.asked = 0
signer = PKCS.new(key, saltLen=sLen)
s = signer.sign(h)
self.assertEqual(key.asked, sLen)
self.failUnless(signer.verify(h, s))
# Verify that sign() uses the custom MGF
mgfcalls = 0
signer = PKCS.new(key, newMGF)
s = signer.sign(h)
self.assertEqual(mgfcalls, 1)
self.failUnless(signer.verify(h, s))
# Verify that sign() does not call the RNG
# when salt length is 0, even when a new MGF is provided
key.asked = 0
mgfcalls = 0
signer = PKCS.new(key, newMGF, 0)
s = signer.sign(h)
self.assertEqual(key.asked,0)
self.assertEqual(mgfcalls, 1)
self.failUnless(signer.verify(h, s))
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_PSS_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
| apache-2.0 |
girving/tensorflow | tensorflow/python/kernel_tests/edit_distance_op_test.py | 139 | 8145 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU":
x = np.asarray(x, dtype=np.int64)
return constant_op.constant(x)
class EditDistanceTest(test.TestCase):
def _testEditDistanceST(self,
hypothesis_st,
truth_st,
normalize,
expected_output,
expected_shape,
expected_err_re=None):
edit_distance = array_ops.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
if expected_err_re is None:
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
def _testEditDistance(self,
hypothesis,
truth,
normalize,
expected_output,
expected_err_re=None):
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]
]
# SparseTensorValue inputs.
with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
# SparseTensor inputs.
with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensor(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensor(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
hypothesis_values = [0, 1, 1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [1, 0], [1, 1]]
truth_values = [0, 1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0], [1, 0], [1, 1]]
hypothesis_values = [10, 10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
truth_values = [1, 2, 1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0 / len("altruistic"), 6.0 / len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0], [1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0], [1, 0, 0], [1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [
[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]
] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, loss is 1/0 = inf
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesisAndTruth(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [0] # Normalized is 0 because of exact match
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/scons-2.3.1/SCons/Tool/rmic.py | 8 | 4446 | """SCons.Tool.rmic
Tool-specific initialization for rmic.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rmic.py 2014/03/02 14:18:15 garyo"
import os.path
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
def emit_rmic_classes(target, source, env):
"""Create and return lists of Java RMI stub and skeleton
class files to be created from a set of class files.
"""
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
classdir = env.get('JAVACLASSDIR')
if not classdir:
try:
s = source[0]
except IndexError:
classdir = '.'
else:
try:
classdir = s.attributes.java_classdir
except AttributeError:
classdir = '.'
classdir = env.Dir(classdir).rdir()
if str(classdir) == '.':
c_ = None
else:
c_ = str(classdir) + os.sep
slist = []
for src in source:
try:
classname = src.attributes.java_classname
except AttributeError:
classname = str(src)
if c_ and classname[:len(c_)] == c_:
classname = classname[len(c_):]
if class_suffix and classname[:-len(class_suffix)] == class_suffix:
classname = classname[-len(class_suffix):]
s = src.rfile()
s.attributes.java_classdir = classdir
s.attributes.java_classname = classname
slist.append(s)
stub_suffixes = ['_Stub']
if env.get('JAVAVERSION') == '1.4':
stub_suffixes.append('_Skel')
tlist = []
for s in source:
for suff in stub_suffixes:
fname = s.attributes.java_classname.replace('.', os.sep) + \
suff + class_suffix
t = target[0].File(fname)
t.attributes.java_lookupdir = target[0]
tlist.append(t)
return tlist, source
RMICAction = SCons.Action.Action('$RMICCOM', '$RMICCOMSTR')
RMICBuilder = SCons.Builder.Builder(action = RMICAction,
emitter = emit_rmic_classes,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = SCons.Node.FS.Dir,
source_factory = SCons.Node.FS.File)
def generate(env):
"""Add Builders and construction variables for rmic to an Environment."""
env['BUILDERS']['RMIC'] = RMICBuilder
env['RMIC'] = 'rmic'
env['RMICFLAGS'] = SCons.Util.CLVar('')
env['RMICCOM'] = '$RMIC $RMICFLAGS -d ${TARGET.attributes.java_lookupdir} -classpath ${SOURCE.attributes.java_classdir} ${SOURCES.attributes.java_classname}'
env['JAVACLASSSUFFIX'] = '.class'
def exists(env):
# As reported by Jan Nijtmans in issue #2730, the simple
# return env.Detect('rmic')
# doesn't always work during initialization. For now, we
# stop trying to detect an executable (analogous to the
# javac Builder).
# TODO: Come up with a proper detect() routine...and enable it.
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
Dklotz-Circle/security_monkey | security_monkey/watchers/rds_security_group.py | 14 | 4926 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.rds_security_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.constants import TROUBLE_REGIONS
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app
from boto.rds import regions
class RDSSecurityGroup(Watcher):
index = 'rds'
i_am_singular = 'RDS Security Group'
i_am_plural = 'RDS Security Groups'
def __init__(self, accounts=None, debug=False):
super(RDSSecurityGroup, self).__init__(accounts=accounts, debug=debug)
def slurp(self):
"""
:returns: item_list - list of RDS Security Groups.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
for region in regions():
app.logger.debug("Checking {}/{}/{}".format(self.index, account, region.name))
sgs = []
try:
rds = connect(account, 'rds', region=region)
marker = None
while True:
response = self.wrap_aws_rate_limited_call(
rds.get_all_dbsecurity_groups,
marker=marker
)
sgs.extend(response)
if response.marker:
marker = response.marker
else:
break
except Exception as e:
if region.name not in TROUBLE_REGIONS:
exc = BotoConnectionIssue(str(e), self.index, account, region.name)
self.slurp_exception((self.index, account, region.name), exc, exception_map)
continue
app.logger.debug("Found {} {}".format(len(sgs), self.i_am_plural))
for sg in sgs:
if self.check_ignore_list(sg.name):
continue
name = sg.name
vpc_id = None
if hasattr(sg, 'VpcId'):
vpc_id = sg.VpcId
name = "{} (in {})".format(sg.name, vpc_id)
item_config = {
"name": sg.name,
"description": sg.description,
"owner_id": sg.owner_id,
"region": region.name,
"ec2_groups": [],
"ip_ranges": [],
"vpc_id": vpc_id
}
for ipr in sg.ip_ranges:
ipr_config = {
"cidr_ip": ipr.cidr_ip,
"status": ipr.status,
}
item_config["ip_ranges"].append(ipr_config)
item_config["ip_ranges"] = sorted(item_config["ip_ranges"])
for ec2_sg in sg.ec2_groups:
ec2sg_config = {
"name": ec2_sg.name,
"owner_id": ec2_sg.owner_id,
"Status": ec2_sg.Status,
}
item_config["ec2_groups"].append(ec2sg_config)
item_config["ec2_groups"] = sorted(item_config["ec2_groups"])
item = RDSSecurityGroupItem(region=region.name, account=account, name=name, config=item_config)
item_list.append(item)
return item_list, exception_map
class RDSSecurityGroupItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, config={}):
super(RDSSecurityGroupItem, self).__init__(
index=RDSSecurityGroup.index,
region=region,
account=account,
name=name,
new_config=config)
| apache-2.0 |
pythondigest/pythondigest | digest/forms.py | 1 | 3789 | # -*- encoding: utf-8 -*-
from ckeditor.widgets import CKEditorWidget, json_encode
from django import forms
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.options import get_ul_class
from django.forms import ChoiceField, ModelForm
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
try:
# Django >=1.7
from django.forms.utils import flatatt
except ImportError:
# Django <1.7
from django.forms.util import flatatt
from digest.models import Item
ITEM_STATUS_CHOICES = (('queue', 'В очередь'),
('moderated', 'Отмодерировано'),)
class GlavRedWidget(CKEditorWidget):
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
self._set_config()
external_plugin_resources = [
[force_text(a), force_text(b), force_text(c)]
for a, b, c in self.external_plugin_resources]
return mark_safe(
render_to_string('custom_widget/ckeditor_widget.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_text(value)),
'id': final_attrs['id'],
'config': json_encode(self.config),
'external_plugin_resources': json_encode(
external_plugin_resources)
}))
class ItemStatusForm(ModelForm):
status = ChoiceField(label='Статус',
widget=widgets.AdminRadioSelect(
attrs={'class': get_ul_class(admin.HORIZONTAL)}),
choices=ITEM_STATUS_CHOICES)
class Meta:
model = Item
fields = '__all__'
widgets = {
'description': GlavRedWidget,
}
EMPTY_VALUES = (None, '')
class HoneypotWidget(forms.TextInput):
is_hidden = True
def __init__(self, attrs=None, html_comment=False, *args, **kwargs):
self.html_comment = html_comment
super(HoneypotWidget, self).__init__(attrs, *args, **kwargs)
if 'class' not in self.attrs:
self.attrs['style'] = 'display:none'
def render(self, *args, **kwargs):
html = super(HoneypotWidget, self).render(*args, **kwargs)
if self.html_comment:
html = '<!-- %s -->' % html
return html
class HoneypotField(forms.Field):
widget = HoneypotWidget
def clean(self, value):
if self.initial in EMPTY_VALUES and value in EMPTY_VALUES or value == self.initial:
return value
raise forms.ValidationError('Anti-spam field changed in value.')
class AddNewsForm(forms.ModelForm):
name = HoneypotField()
class Meta:
model = Item
fields = ('link', 'section', 'title', 'language', 'description',)
def __init__(self, *args, **kwargs):
kwargs['initial'] = {
'section': 6
} # На форме 6й section будет помечен как selected
super(AddNewsForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs = {
'class': 'form-control small',
}
self.fields['title'].required = False
self.fields['link'].widget.attrs = {
'class': 'form-control small',
}
self.fields['language'].widget.attrs = {
'class': 'form-control',
}
self.fields['description'].widget.attrs = {
'class': 'form-control',
}
self.fields['section'].widget.attrs = {
'class': 'form-control',
}
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django_extensions/management/commands/mail_debug.py | 27 | 2924 | import asyncore
import sys
from logging import getLogger
from optparse import make_option
from smtpd import SMTPServer
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import setup_logger, signalcommand
logger = getLogger(__name__)
class ExtensionDebuggingServer(SMTPServer):
"""Duplication of smtpd.DebuggingServer, but using logging instead of print."""
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
"""Output will be sent to the module logger at INFO level."""
inheaders = 1
lines = data.split('\n')
logger.info('---------- MESSAGE FOLLOWS ----------')
for line in lines:
# headers first
if inheaders and not line:
logger.info('X-Peer: %s' % peer[0])
inheaders = 0
logger.info(line)
logger.info('------------ END MESSAGE ------------')
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).'),
make_option('--use-settings', dest='use_settings',
action='store_true', default=False,
help='Uses EMAIL_HOST and HOST_PORT from Django settings.'),
)
help = "Starts a test mail server for development."
args = '[optional port number or ippaddr:port]'
requires_system_checks = False
@signalcommand
def handle(self, addrport='', *args, **options):
if args:
raise CommandError('Usage is mail_debug %s' % self.args)
if not addrport:
if options.get('use_settings', False):
from django.conf import settings
addr = getattr(settings, 'EMAIL_HOST', '')
port = str(getattr(settings, 'EMAIL_PORT', '1025'))
else:
addr = ''
port = '1025'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
else:
port = int(port)
# Add console handler
setup_logger(logger, stream=self.stdout, filename=options.get('output_file', None))
def inner_run():
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
print("Now accepting mail at %s:%s -- use %s to quit" % (addr, port, quit_command))
ExtensionDebuggingServer((addr, port), None)
asyncore.loop()
try:
inner_run()
except KeyboardInterrupt:
pass
| agpl-3.0 |
rksaxena/hacker_ramp | find_overlap.py | 1 | 8043 | import tags
import difflib
import json
vogue_data = {'foundation': ['Mousse Foundation'], 'chain': ['chain'], 'corset': ['closet'], 'tops': ['jersey top'], 'misc': ['everyday heavy duty makeup', 'minimum', 'gallery', 'place', 'heart-wrenching classic', 'blends', 'midway', 'high level', 'fashionista', 'heart-wrenching classic', 'skin types', 'skin types', 'faux fur bomber', 'everyday heavy duty makeup', 'everyday', 'off-shoulder blouses', 'style', 'acute aversion', 'gold accents', 'bell-bottoms', 'cabana', 'makes', 'faux fur bomber', 'long lasting', 'early 19th century', 'tones', 'Bollywood', 'erstwhile flower childs', 'bohemian sartorial sensibility', 'off-shoulder blouses', 'celebrity-inspired summer', 'cool evening winds', 'animal prints', 'perfect canvas', 'make-up free look', 'fashion accessory', 'no-make up', 'great coverage', 'collarbone steal', 'womens', 'thinking ahead', 'faux fur bomber', 'leather', 'light', 'popularity charts', 'Amazon India Fashion Week', 'larger-than-life persona', 'bohemian sartorial sensibility', 'pure style', 'animal prints', 'Rose Ivory', 'Rose Honey', 'round'], 'briefs': ['brief comebacks', 'brief comebacks'], 'source': 'vogue', 'trousers': ['trousers'], 'necklace': ['favourite statement necklace', 'favourite statement necklace'], 'earrings': ['stud earrings'], 'skirts': ['midi skirts', 'midi skirts', 'midi skirts']}
zara_data = {'coats': ['tribal linen coat', 'openwork coat'], 'sweaters': ['short sleeve sweater'], 'sunglasses': ['aviator sunglasses'], 'shorts': ['paisley print bermuda shorts', 'micro polka dot textured weave bermuda shorts', 'guipure lace bermuda shorts', 'bird print flowing bermuda shorts'], 'sweatshirts': ['sweatshirt', 'raglan sleeve sweatshirt', 'raglan sleeve sweatshirt'], 'tops': ['street top', 'frayed peplum top', 'guipure lace top'], 'dungarees': ['vintage fade denim dungarees', 'white dungarees with rips'], 'misc': ['tribal jacquard scarf', 'culottes', 'leather platform slides', 'blouse with lace trim', 'bandana print silk style scarf', 'blouse with open back', 'tie-dye jacquard culottes', 'tie-dye hand embroidered poncho', 'striped blouse'], 'source': 'zara', 't-shirt': ['striped back and pocket t-shirt', 'striped back and pocket t-shirt', 'striped fabric seamed t-shirt', 't-shirt with zip on sleeves', 'printed t-shirt', 'flower print t-shirt', 'textured t-shirt', 'creased texture t-shirt', 'textured t-shirt', 'short sleeve t-shirt with oversized pocket', 'short sleeve t-shirt with oversized pocket', 't-shirt with zip on sleeves', 'short sleeve t-shirt with oversized pocket', 'short sleeve t-shirt with oversized pocket', 'oversized linen t-shirt'], 'bag': ['studs and chain cross body bag'], 'blazers': ['textured weave suit blazer', 'paisley print blazer', 'micro polka dot textured weave blazer'], 'skirts': ['short tribal skirt', 'wrap skirt', 'tie-dye midi skirt', 'lace tube skirt'], 'shirts': ['plain twill shirt', 'striped indigo shirt', 'bull denim shirt', 'bull denim shirt', 'faded indigo striped shirt', 'poplin shirt', 'stretch shirt with mandarin collar', 'striped shirt', 'horizontal stripe shirt', 'stretch shirt with mandarin collar', 'poplin shirt with contrasting collar', 'short sleeve nautical print polo shirt', 'short sleeve nautical print polo shirt', 'poplin shirt', 'oversized studio shirt', 'denim shirt dress', 'asymmetric hem shirt'], 'jackets': ['bomber style jacket', 'patch bomber jacket', 'contrast blue jacket', 'long embroidered bomber jacket', 'roll-up sleeve jacket', 'bleach wash denim jacket', 'guipure lace bomber jacket', 'sequin patchwork jacket', 'jacket with asymmetric back'], 'trousers': ['textured weave suit trousers', 'bleach effect skinny trousers', 'darted trousers with cord', 'darted trousers with cord', 'darted trousers with cord', 'high waist skinny trousers', 'cropped trousers with front pleat', 'mid-rise biker trousers', 'straight leg flowing trousers', 'mid-rise power stretch trousers'], 'necklace': ['triple choker necklace'], 'sandals': ['crossover metallic sandals', 'leather strap sandals', 'casual roman sandals', 'flat metallic leather sandals', 'leather sandals with buckle'], 'hat': ['straw hat'], 'dresses': ['multicoloured striped dress', 'denim dress', 'lace midi dress', 'lace tube dress', 'guipure lace tube dress'], 'jumpsuit': ['faded denim jumpsuit', 'short jumpsuit', 'short jumpsuit', 'short jumpsuit']}
def get_article_type(line):
savetag = ''
inner_flag = 0
ratio = 0
tag_map = tags.create_set_article_types()
line1 = line.split()
last_word = line1[-1]
if last_word in tag_map:
return last_word
for string in tag_map:
if difflib.SequenceMatcher(None, string, last_word).ratio() > 0.85:
return string
for str in line1:
for tag in tag_map:
edit_distance = difflib.SequenceMatcher(None, tag, str).ratio()
if edit_distance > 0.8 and edit_distance > ratio:
savetag = tag
ratio = edit_distance
inner_flag = 1
if inner_flag == 1:
return savetag
return 'misc'
def count_duplicates_within_source(json):
unique = set()
new_map = dict()
source = json['source']
json.pop('source')
for k, v in json.iteritems():
new_map[k] = dict()
for string in v:
if string not in unique:
unique.add(string)
new_map[k].update({string:
{
'source': source,
'count': 1
}
})
else:
new_map[k][string]['count'] += 1
return new_map
def map_article_types(data, source):
map = dict()
map['misc'] = list()
source = data['source']
data.pop('source')
tag_map = tags.create_set_article_types()
for k, v in data.iteritems():
for line in v:
flag = 0
inner_flag = 0
line1 = line.split()
last_word = line1[-1]
if last_word in tag_map:
if last_word not in map:
map[last_word] = list()
map[last_word] = [line]
else:
map[last_word].append(line)
continue
for xyz in tag_map:
if difflib.SequenceMatcher(None, xyz, last_word).ratio() > 0.85:
if xyz in map:
map[xyz].append(line)
else:
map[xyz] = [line]
flag = 1
break
if flag == 1:
continue
ratio = 0
savetag = ''
for string in line1:
for tag in tag_map:
edit_distance = difflib.SequenceMatcher(None, tag, string).ratio()
if edit_distance > 0.8 and edit_distance > ratio:
savetag = tag
ratio = edit_distance
inner_flag = 1
if inner_flag == 1:
if savetag in map:
map[savetag].append(line)
else:
map[savetag] = [line]
continue
map['misc'].append(line)
map['source'] = source
return map
def merge_dicts(dict1, dict2):
#map1 = map_article_types(dict1, 'vogue')
#map2 = map_article_types(dict2, 'zara')
map1 = dict1
map2 = dict2
# print json.dumps(map1)
# print json.dumps(map2)
cleaned_map1 = count_duplicates_within_source(map1)
cleaned_map2 = count_duplicates_within_source(map2)
# print cleaned_map2
# print cleaned_map1
merged_map = cleaned_map1.copy()
for k, v in cleaned_map2.iteritems():
if k in merged_map:
merged_map[k].update(v)
else:
merged_map[k] = v
return merged_map
def match_ratio():
print difflib.SequenceMatcher(None, 'tent', 'attention').ratio()
# print merge_dicts(vogue_data, zara_data) | apache-2.0 |
janocat/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Dzerard/cms | public/js/three/utils/exporters/blender/2.66/scripts/addons/io_mesh_threejs/export_threejs.py | 5 | 74848 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Blender exporter for Three.js (ASCII JSON format).
TODO
- binary format
"""
import bpy
import mathutils
import shutil
import os
import os.path
import math
import operator
import random
# #####################################################
# Configuration
# #####################################################
DEFAULTS = {
"bgcolor" : [0, 0, 0],
"bgalpha" : 1.0,
"position" : [0, 0, 0],
"rotation" : [-math.pi/2, 0, 0],
"scale" : [1, 1, 1],
"camera" :
{
"name" : "default_camera",
"type" : "PerspectiveCamera",
"near" : 1,
"far" : 10000,
"fov" : 60,
"aspect": 1.333,
"position" : [0, 0, 10],
"target" : [0, 0, 0]
},
"light" :
{
"name" : "default_light",
"type" : "DirectionalLight",
"direction" : [0, 1, 1],
"color" : [1, 1, 1],
"intensity" : 0.8
}
}
ROTATE_X_PI2 = mathutils.Quaternion((1.0, 0.0, 0.0), math.radians(-90.0)).to_matrix().to_4x4()
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# skinning
MAX_INFLUENCES = 2
# #####################################################
# Templates - scene
# #####################################################
TEMPLATE_SCENE_ASCII = """\
{
"metadata" :
{
"formatVersion" : 3.2,
"type" : "scene",
"sourceFile" : "%(fname)s",
"generatedBy" : "Blender 2.66 Exporter",
"objects" : %(nobjects)s,
"geometries" : %(ngeometries)s,
"materials" : %(nmaterials)s,
"textures" : %(ntextures)s
},
"urlBaseType" : %(basetype)s,
%(sections)s
"transform" :
{
"position" : %(position)s,
"rotation" : %(rotation)s,
"scale" : %(scale)s
},
"defaults" :
{
"bgcolor" : %(bgcolor)s,
"bgalpha" : %(bgalpha)f,
"camera" : %(defcamera)s
}
}
"""
TEMPLATE_SECTION = """
"%s" :
{
%s
},
"""
TEMPLATE_OBJECT = """\
%(object_id)s : {
"geometry" : %(geometry_id)s,
"groups" : [ %(group_id)s ],
"material" : %(material_id)s,
"position" : %(position)s,
"rotation" : %(rotation)s,
"quaternion": %(quaternion)s,
"scale" : %(scale)s,
"visible" : %(visible)s,
"castShadow" : %(castShadow)s,
"receiveShadow" : %(receiveShadow)s,
"doubleSided" : %(doubleSided)s
}"""
TEMPLATE_EMPTY = """\
%(object_id)s : {
"groups" : [ %(group_id)s ],
"position" : %(position)s,
"rotation" : %(rotation)s,
"quaternion": %(quaternion)s,
"scale" : %(scale)s
}"""
TEMPLATE_GEOMETRY_LINK = """\
%(geometry_id)s : {
"type" : "ascii",
"url" : %(model_file)s
}"""
TEMPLATE_GEOMETRY_EMBED = """\
%(geometry_id)s : {
"type" : "embedded",
"id" : %(embed_id)s
}"""
TEMPLATE_TEXTURE = """\
%(texture_id)s : {
"url": %(texture_file)s%(extras)s
}"""
TEMPLATE_MATERIAL_SCENE = """\
%(material_id)s : {
"type": %(type)s,
"parameters": { %(parameters)s }
}"""
TEMPLATE_CAMERA_PERSPECTIVE = """\
%(camera_id)s : {
"type" : "PerspectiveCamera",
"fov" : %(fov)f,
"aspect": %(aspect)f,
"near" : %(near)f,
"far" : %(far)f,
"position": %(position)s,
"target" : %(target)s
}"""
TEMPLATE_CAMERA_ORTHO = """\
%(camera_id)s : {
"type" : "OrthographicCamera",
"left" : %(left)f,
"right" : %(right)f,
"top" : %(top)f,
"bottom": %(bottom)f,
"near" : %(near)f,
"far" : %(far)f,
"position": %(position)s,
"target" : %(target)s
}"""
TEMPLATE_LIGHT_DIRECTIONAL = """\
%(light_id)s : {
"type" : "DirectionalLight",
"direction" : %(direction)s,
"color" : %(color)d,
"intensity" : %(intensity).2f
}"""
TEMPLATE_LIGHT_POINT = """\
%(light_id)s : {
"type" : "PointLight",
"position" : %(position)s,
"color" : %(color)d,
"intensity" : %(intensity).3f
}"""
TEMPLATE_VEC4 = '[ %g, %g, %g, %g ]'
TEMPLATE_VEC3 = '[ %g, %g, %g ]'
TEMPLATE_VEC2 = '[ %g, %g ]'
TEMPLATE_STRING = '"%s"'
TEMPLATE_HEX = "0x%06x"
# #####################################################
# Templates - model
# #####################################################
TEMPLATE_FILE_ASCII = """\
{
"metadata" :
{
"formatVersion" : 3.1,
"generatedBy" : "Blender 2.66 Exporter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : [%(nuvs)s],
"materials" : %(nmaterial)d,
"morphTargets" : %(nmorphTarget)d,
"bones" : %(nbone)d
},
%(model)s
}
"""
TEMPLATE_MODEL_ASCII = """\
"scale" : %(scale)f,
"materials" : [%(materials)s],
"vertices" : [%(vertices)s],
"morphTargets" : [%(morphTargets)s],
"normals" : [%(normals)s],
"colors" : [%(colors)s],
"uvs" : [%(uvs)s],
"faces" : [%(faces)s],
"bones" : [%(bones)s],
"skinIndices" : [%(indices)s],
"skinWeights" : [%(weights)s],
"animations" : [%(animations)s]
"""
TEMPLATE_VERTEX = "%g,%g,%g"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%g,%g,%g"
TEMPLATE_UV = "%g,%g"
TEMPLATE_C = "%d"
# #####################################################
# Utils
# #####################################################
def veckey3(x,y,z):
return round(x, 6), round(y, 6), round(z, 6)
def veckey3d(v):
return veckey3(v.x, v.y, v.z)
def veckey2d(v):
return round(v[0], 6), round(v[1], 6)
def get_faces(obj):
if hasattr(obj, "tessfaces"):
return obj.tessfaces
else:
return obj.faces
def get_normal_indices(v, normals, mesh):
n = []
mv = mesh.vertices
for i in v:
normal = mv[i].normal
key = veckey3d(normal)
n.append( normals[key] )
return n
def get_uv_indices(face_index, uvs, mesh, layer_index):
uv = []
uv_layer = mesh.tessface_uv_textures[layer_index].data
for i in uv_layer[face_index].uv:
uv.append( uvs[veckey2d(i)] )
return uv
def get_color_indices(face_index, colors, mesh):
c = []
color_layer = mesh.tessface_vertex_colors.active.data
face_colors = color_layer[face_index]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for i in face_colors:
c.append( colors[hexcolor(i)] )
return c
def rgb2int(rgb):
color = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255);
return color
# #####################################################
# Utils - files
# #####################################################
def write_file(fname, content):
out = open(fname, "w")
out.write(content)
out.close()
def ensure_folder_exist(foldername):
"""Create folder (with whole path) if it doesn't exist yet."""
if not os.access(foldername, os.R_OK|os.W_OK|os.X_OK):
os.makedirs(foldername)
def ensure_extension(filepath, extension):
if not filepath.lower().endswith(extension):
filepath += extension
return filepath
def generate_mesh_filename(meshname, filepath):
normpath = os.path.normpath(filepath)
path, ext = os.path.splitext(normpath)
return "%s.%s%s" % (path, meshname, ext)
# #####################################################
# Utils - alignment
# #####################################################
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0].co.x
miny = maxy = vertices[0].co.y
minz = maxz = vertices[0].co.z
for v in vertices[1:]:
if v.co.x < minx:
minx = v.co.x
elif v.co.x > maxx:
maxx = v.co.x
if v.co.y < miny:
miny = v.co.y
elif v.co.y > maxy:
maxy = v.co.y
if v.co.z < minz:
minz = v.co.z
elif v.co.z > maxz:
maxz = v.co.z
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in range(len(vertices)):
vertices[i].co.x += t[0]
vertices[i].co.y += t[1]
vertices[i].co.z += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
# #####################################################
# Elements rendering
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertices(vertices, option_vertices_truncate, option_vertices):
if not option_vertices:
return ""
return ",".join(generate_vertex(v, option_vertices_truncate) for v in vertices)
def generate_vertex(v, option_vertices_truncate):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v.co.x, v.co.y, v.co.z)
else:
return TEMPLATE_VERTEX_TRUNCATE % (v.co.x, v.co.y, v.co.z)
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_vertex_color(c):
return TEMPLATE_C % c
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
# #####################################################
# Model exporter - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces):
if not option_faces:
return "", 0
vertex_offset = 0
material_offset = 0
chunks = []
for mesh, object in meshes:
vertexUV = len(mesh.uv_textures) > 0
vertexColors = len(mesh.vertex_colors) > 0
mesh_colors = option_colors and vertexColors
mesh_uvs = option_uv_coords and vertexUV
if vertexUV:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
mesh_extract_uvs = False
if vertexColors:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
mesh_extract_colors = False
for i, f in enumerate(get_faces(mesh)):
face = generate_face(f, i, normals, uv_layers, colors, mesh, option_normals, mesh_colors, mesh_uvs, option_materials, vertex_offset, material_offset)
chunks.append(face)
vertex_offset += len(mesh.vertices)
material_count = len(mesh.materials)
if material_count == 0:
material_count = 1
material_offset += material_count
return ",".join(chunks), len(chunks)
def generate_face(f, faceIndex, normals, uv_layers, colors, mesh, option_normals, option_colors, option_uv_coords, option_materials, vertex_offset, material_offset):
isTriangle = ( len(f.vertices) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = option_materials
hasFaceUvs = False # not supported in Blender
hasFaceVertexUvs = option_uv_coords
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = option_normals
hasFaceColors = False # not supported in Blender
hasFaceVertexColors = option_colors
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in range(nVertices):
index = f.vertices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
index = f.material_index + material_offset
faceData.append( index )
if hasFaceVertexUvs:
for layer_index, uvs in enumerate(uv_layers):
uv = get_uv_indices(faceIndex, uvs, mesh, layer_index)
for i in range(nVertices):
index = uv[i]
faceData.append(index)
if hasFaceVertexNormals:
n = get_normal_indices(f.vertices, normals, mesh)
for i in range(nVertices):
index = n[i]
faceData.append(index)
if hasFaceVertexColors:
c = get_color_indices(faceIndex, colors, mesh)
for i in range(nVertices):
index = c[i]
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Model exporter - normals
# #####################################################
def extract_vertex_normals(mesh, normals, count):
for f in get_faces(mesh):
for v in f.vertices:
normal = mesh.vertices[v].normal
key = veckey3d(normal)
if key not in normals:
normals[key] = count
count += 1
return count
def generate_normals(normals, option_normals):
if not option_normals:
return ""
chunks = []
for key, index in sorted(normals.items(), key = operator.itemgetter(1)):
chunks.append(key)
return ",".join(generate_normal(n) for n in chunks)
# #####################################################
# Model exporter - vertex colors
# #####################################################
def extract_vertex_colors(mesh, colors, count):
color_layer = mesh.tessface_vertex_colors.active.data
for face_index, face in enumerate(get_faces(mesh)):
face_colors = color_layer[face_index]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for c in face_colors:
key = hexcolor(c)
if key not in colors:
colors[key] = count
count += 1
return count
def generate_vertex_colors(colors, option_colors):
if not option_colors:
return ""
chunks = []
for key, index in sorted(colors.items(), key=operator.itemgetter(1)):
chunks.append(key)
return ",".join(generate_vertex_color(c) for c in chunks)
# #####################################################
# Model exporter - UVs
# #####################################################
def extract_uvs(mesh, uv_layers, counts):
for index, layer in enumerate(mesh.tessface_uv_textures):
if len(uv_layers) <= index:
uvs = {}
count = 0
uv_layers.append(uvs)
counts.append(count)
else:
uvs = uv_layers[index]
count = counts[index]
uv_layer = layer.data
for face_index, face in enumerate(get_faces(mesh)):
for uv_index, uv in enumerate(uv_layer[face_index].uv):
key = veckey2d(uv)
if key not in uvs:
uvs[key] = count
count += 1
counts[index] = count
return counts
def generate_uvs(uv_layers, option_uv_coords):
if not option_uv_coords:
return "[]"
layers = []
for uvs in uv_layers:
chunks = []
for key, index in sorted(uvs.items(), key=operator.itemgetter(1)):
chunks.append(key)
layer = ",".join(generate_uv(n) for n in chunks)
layers.append(layer)
return ",".join("[%s]" % n for n in layers)
# ##############################################################################
# Model exporter - armature
# (only the first armature will exported)
# ##############################################################################
def get_armature():
if len(bpy.data.armatures) == 0:
print("Warning: no armatures in the scene")
return None, None
armature = bpy.data.armatures[0]
# Someone please figure out a proper way to get the armature node
for object in bpy.data.objects:
if object.type == 'ARMATURE':
return armature, object
print("Warning: no node of type 'ARMATURE' in the scene")
return None, None
# ##############################################################################
# Model exporter - bones
# (only the first armature will exported)
# ##############################################################################
def generate_bones(option_bones, flipyz):
if not option_bones:
return "", 0
armature, armatureObject = get_armature()
if armature is None or armatureObject is None:
return "", 0
hierarchy = []
TEMPLATE_BONE = '{"parent":%d,"name":"%s","pos":[%g,%g,%g],"rotq":[0,0,0,1]}'
for bone in armature.bones:
bonePos = None
boneIndex = None
if bone.parent == None:
bonePos = bone.head_local
boneIndex = -1
else:
bonePos = bone.head_local - bone.parent.head_local
boneIndex = i = 0
for parent in armature.bones:
if parent.name == bone.parent.name:
boneIndex = i
i += 1
bonePosWorld = armatureObject.matrix_world * bonePos
if flipyz:
joint = TEMPLATE_BONE % (boneIndex, bone.name, bonePosWorld.x, bonePosWorld.z, -bonePosWorld.y)
hierarchy.append(joint)
else:
joint = TEMPLATE_BONE % (boneIndex, bone.name, bonePosWorld.x, bonePosWorld.y, bonePosWorld.z)
hierarchy.append(joint)
bones_string = ",".join(hierarchy)
return bones_string, len(armature.bones)
# ##############################################################################
# Model exporter - skin indices and weights
# ##############################################################################
def generate_indices_and_weights(meshes, option_skinning):
if not option_skinning or len(bpy.data.armatures) == 0:
return "", ""
indices = []
weights = []
armature, armatureObject = get_armature()
for mesh, object in meshes:
i = 0
mesh_index = -1
# find the original object
for obj in bpy.data.objects:
if obj.name == mesh.name or obj == object:
mesh_index = i
i += 1
if mesh_index == -1:
print("generate_indices: couldn't find object for mesh", mesh.name)
continue
object = bpy.data.objects[mesh_index]
for vertex in mesh.vertices:
# sort bones by influence
bone_array = []
for group in vertex.groups:
index = group.group
weight = group.weight
bone_array.append( (index, weight) )
bone_array.sort(key = operator.itemgetter(1), reverse=True)
# select first N bones
for i in range(MAX_INFLUENCES):
if i < len(bone_array):
bone_proxy = bone_array[i]
found = 0
index = bone_proxy[0]
weight = bone_proxy[1]
for j, bone in enumerate(armature.bones):
if object.vertex_groups[index].name == bone.name:
indices.append('%d' % j)
weights.append('%g' % weight)
found = 1
break
if found != 1:
indices.append('0')
weights.append('0')
else:
indices.append('0')
weights.append('0')
indices_string = ",".join(indices)
weights_string = ",".join(weights)
return indices_string, weights_string
# ##############################################################################
# Model exporter - skeletal animation
# (only the first action will exported)
# ##############################################################################
def generate_animation(option_animation_skeletal, option_frame_step, flipyz, action_index):
if not option_animation_skeletal or len(bpy.data.actions) == 0 or len(bpy.data.actions) == 0:
return ""
# TODO: Add scaling influences
action = bpy.data.actions[action_index]
armature, armatureObject = get_armature()
if armature is None or armatureObject is None:
return "", 0
armatureMat = armatureObject.matrix_world
l,r,s = armatureMat.decompose()
armatureRotMat = r.to_matrix()
parents = []
parent_index = -1
fps = bpy.data.scenes[0].render.fps
end_frame = action.frame_range[1]
start_frame = action.frame_range[0]
frame_length = end_frame - start_frame
TEMPLATE_KEYFRAME_FULL = '{"time":%g,"pos":[%g,%g,%g],"rot":[%g,%g,%g,%g],"scl":[1,1,1]}'
TEMPLATE_KEYFRAME = '{"time":%g,"pos":[%g,%g,%g],"rot":[%g,%g,%g,%g]}'
TEMPLATE_KEYFRAME_POS = '{"time":%g,"pos":[%g,%g,%g]}'
TEMPLATE_KEYFRAME_ROT = '{"time":%g,"rot":[%g,%g,%g,%g]}'
for hierarchy in armature.bones:
keys = []
for frame in range(int(start_frame), int(end_frame / option_frame_step) + 1):
pos, pchange = position(hierarchy, frame * option_frame_step, action, armatureMat)
rot, rchange = rotation(hierarchy, frame * option_frame_step, action, armatureRotMat)
if flipyz:
px, py, pz = pos.x, pos.z, -pos.y
rx, ry, rz, rw = rot.x, rot.z, -rot.y, rot.w
else:
px, py, pz = pos.x, pos.y, pos.z
rx, ry, rz, rw = rot.x, rot.y, rot.z, rot.w
# START-FRAME: needs pos, rot and scl attributes (required frame)
if frame == int(start_frame):
time = (frame * option_frame_step - start_frame) / fps
keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw)
keys.append(keyframe)
# END-FRAME: needs pos, rot and scl attributes with animation length (required frame)
elif frame == int(end_frame / option_frame_step):
time = frame_length / fps
keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw)
keys.append(keyframe)
# MIDDLE-FRAME: needs only one of the attributes, can be an empty frame (optional frame)
elif pchange == True or rchange == True:
time = (frame * option_frame_step - start_frame) / fps
if pchange == True and rchange == True:
keyframe = TEMPLATE_KEYFRAME % (time, px, py, pz, rx, ry, rz, rw)
elif pchange == True:
keyframe = TEMPLATE_KEYFRAME_POS % (time, px, py, pz)
elif rchange == True:
keyframe = TEMPLATE_KEYFRAME_ROT % (time, rx, ry, rz, rw)
keys.append(keyframe)
keys_string = ",".join(keys)
parent = '{"parent":%d,"keys":[%s]}' % (parent_index, keys_string)
parent_index += 1
parents.append(parent)
hierarchy_string = ",".join(parents)
animation_string = '"name":"%s","fps":%d,"length":%g,"hierarchy":[%s]' % (action.name, fps, (frame_length / fps), hierarchy_string)
return animation_string
def generate_all_animations(option_animation_skeletal, option_frame_step, flipyz):
all_animations_string = ""
if option_animation_skeletal:
for index in range(0, len(bpy.data.actions)):
if index != 0 :
all_animations_string += ", \n"
all_animations_string += "{" + generate_animation(option_animation_skeletal, option_frame_step, flipyz, index) + "}"
return all_animations_string
def handle_position_channel(channel, frame, position):
change = False
if channel.array_index in [0, 1, 2]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 0:
position.x = value
if channel.array_index == 1:
position.y = value
if channel.array_index == 2:
position.z = value
return change
def position(bone, frame, action, armatureMatrix):
position = mathutils.Vector((0,0,0))
change = False
ngroups = len(action.groups)
if ngroups > 0:
index = 0
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
for channel in action.groups[index].channels:
if "location" in channel.data_path:
hasChanged = handle_position_channel(channel, frame, position)
change = change or hasChanged
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "location" in data_path:
hasChanged = handle_position_channel(channel, frame, position)
change = change or hasChanged
position = position * bone.matrix_local.inverted()
if bone.parent == None:
position.x += bone.head.x
position.y += bone.head.y
position.z += bone.head.z
else:
parent = bone.parent
parentInvertedLocalMatrix = parent.matrix_local.inverted()
parentHeadTailDiff = parent.tail_local - parent.head_local
position.x += (bone.head * parentInvertedLocalMatrix).x + parentHeadTailDiff.x
position.y += (bone.head * parentInvertedLocalMatrix).y + parentHeadTailDiff.y
position.z += (bone.head * parentInvertedLocalMatrix).z + parentHeadTailDiff.z
return armatureMatrix*position, change
def handle_rotation_channel(channel, frame, rotation):
change = False
if channel.array_index in [0, 1, 2, 3]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 1:
rotation.x = value
elif channel.array_index == 2:
rotation.y = value
elif channel.array_index == 3:
rotation.z = value
elif channel.array_index == 0:
rotation.w = value
return change
def rotation(bone, frame, action, armatureMatrix):
# TODO: calculate rotation also from rotation_euler channels
rotation = mathutils.Vector((0,0,0,1))
change = False
ngroups = len(action.groups)
# animation grouped by bones
if ngroups > 0:
index = -1
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
if index > -1:
for channel in action.groups[index].channels:
if "quaternion" in channel.data_path:
hasChanged = handle_rotation_channel(channel, frame, rotation)
change = change or hasChanged
# animation in raw fcurves
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "quaternion" in data_path:
hasChanged = handle_rotation_channel(channel, frame, rotation)
change = change or hasChanged
rot3 = rotation.to_3d()
rotation.xyz = rot3 * bone.matrix_local.inverted()
rotation.xyz = armatureMatrix * rotation.xyz
return rotation, change
# #####################################################
# Model exporter - materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def generate_mtl(materials):
"""Generate dummy materials.
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
"DbgName": m,
"DbgIndex": index,
"DbgColor": generate_color(index),
"vertexColors" : False
}
return mtl
def value2string(v):
if type(v) == str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
elif type(v) == list:
return "[%s]" % (", ".join(value2string(x) for x in v))
return str(v)
def generate_materials(mtl, materials, draw_type):
"""Generate JS array of materials objects
"""
mtl_array = []
for m in mtl:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if draw_type in [ "BOUNDS", "WIRE" ]:
mtl[m]['wireframe'] = True
mtl[m]['DbgColor'] = 0xff0000
mtl_raw = ",\n".join(['\t\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)]), len(mtl_array)
def extract_materials(mesh, scene, option_colors, option_copy_textures, filepath):
world = scene.world
materials = {}
for m in mesh.materials:
if m:
materials[m.name] = {}
material = materials[m.name]
material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
m.diffuse_intensity * m.diffuse_color[1],
m.diffuse_intensity * m.diffuse_color[2]]
material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
m.specular_intensity * m.specular_color[1],
m.specular_intensity * m.specular_color[2]]
material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
m.ambient * material['colorDiffuse'][1],
m.ambient * material['colorDiffuse'][2]]
material['transparency'] = m.alpha
# not sure about mapping values to Blinn-Phong shader
# Blender uses INT from [1, 511] with default 0
# http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
material["specularCoef"] = m.specular_hardness
textures = guess_material_textures(m)
handle_texture('diffuse', textures, material, filepath, option_copy_textures)
handle_texture('light', textures, material, filepath, option_copy_textures)
handle_texture('normal', textures, material, filepath, option_copy_textures)
handle_texture('specular', textures, material, filepath, option_copy_textures)
handle_texture('bump', textures, material, filepath, option_copy_textures)
material["vertexColors"] = m.THREE_useVertexColors and option_colors
# can't really use this reliably to tell apart Phong from Lambert
# as Blender defaults to non-zero specular color
#if m.specular_intensity > 0.0 and (m.specular_color[0] > 0 or m.specular_color[1] > 0 or m.specular_color[2] > 0):
# material['shading'] = "Phong"
#else:
# material['shading'] = "Lambert"
if textures['normal']:
material['shading'] = "Phong"
else:
material['shading'] = m.THREE_materialType
material['blending'] = m.THREE_blendingType
material['depthWrite'] = m.THREE_depthWrite
material['depthTest'] = m.THREE_depthTest
material['transparent'] = m.use_transparency
return materials
def generate_materials_string(mesh, scene, option_colors, draw_type, option_copy_textures, filepath, offset):
random.seed(42) # to get well defined color order for debug materials
materials = {}
if mesh.materials:
for i, m in enumerate(mesh.materials):
mat_id = i + offset
if m:
materials[m.name] = mat_id
else:
materials["undefined_dummy_%0d" % mat_id] = mat_id
if not materials:
materials = { 'default': 0 }
# default dummy materials
mtl = generate_mtl(materials)
# extract real materials from the mesh
mtl.update(extract_materials(mesh, scene, option_colors, option_copy_textures, filepath))
return generate_materials(mtl, materials, draw_type)
def handle_texture(id, textures, material, filepath, option_copy_textures):
if textures[id]:
texName = 'map%s' % id.capitalize()
repeatName = 'map%sRepeat' % id.capitalize()
wrapName = 'map%sWrap' % id.capitalize()
slot = textures[id]['slot']
texture = textures[id]['texture']
image = texture.image
fname = extract_texture_filename(image)
material[texName] = fname
if option_copy_textures:
save_image(image, fname, filepath)
if texture.repeat_x != 1 or texture.repeat_y != 1:
material[repeatName] = [texture.repeat_x, texture.repeat_y]
if texture.extension == "REPEAT":
wrap_x = "repeat"
wrap_y = "repeat"
if texture.use_mirror_x:
wrap_x = "mirror"
if texture.use_mirror_y:
wrap_y = "mirror"
material[wrapName] = [wrap_x, wrap_y]
if slot.use_map_normal:
if slot.normal_factor != 1.0:
if id == "bump":
material['mapBumpScale'] = slot.normal_factor
else:
material['mapNormalFactor'] = slot.normal_factor
# #####################################################
# ASCII model generator
# #####################################################
def generate_ascii_model(meshes, morphs,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_step):
vertices = []
vertex_offset = 0
vertex_offsets = []
nnormal = 0
normals = {}
ncolor = 0
colors = {}
nuvs = []
uv_layers = []
nmaterial = 0
materials = []
for mesh, object in meshes:
vertexUV = len(mesh.uv_textures) > 0
vertexColors = len(mesh.vertex_colors) > 0
mesh_extract_colors = option_colors and vertexColors
mesh_extract_uvs = option_uv_coords and vertexUV
if vertexUV:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
mesh_extract_uvs = False
if vertexColors:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
mesh_extract_colors = False
vertex_offsets.append(vertex_offset)
vertex_offset += len(vertices)
vertices.extend(mesh.vertices[:])
if option_normals:
nnormal = extract_vertex_normals(mesh, normals, nnormal)
if mesh_extract_colors:
ncolor = extract_vertex_colors(mesh, colors, ncolor)
if mesh_extract_uvs:
nuvs = extract_uvs(mesh, uv_layers, nuvs)
if option_materials:
mesh_materials, nmaterial = generate_materials_string(mesh, scene, mesh_extract_colors, object.draw_type, option_copy_textures, filepath, nmaterial)
materials.append(mesh_materials)
morphTargets_string = ""
nmorphTarget = 0
if option_animation_morph:
chunks = []
for i, morphVertices in enumerate(morphs):
morphTarget = '{ "name": "%s_%06d", "vertices": [%s] }' % ("animation", i, morphVertices)
chunks.append(morphTarget)
morphTargets_string = ",\n\t".join(chunks)
nmorphTarget = len(morphs)
if align_model == 1:
center(vertices)
elif align_model == 2:
bottom(vertices)
elif align_model == 3:
top(vertices)
faces_string, nfaces = generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces)
bones_string, nbone = generate_bones(option_bones, flipyz)
indices_string, weights_string = generate_indices_and_weights(meshes, option_skinning)
materials_string = ",\n\n".join(materials)
model_string = TEMPLATE_MODEL_ASCII % {
"scale" : option_scale,
"uvs" : generate_uvs(uv_layers, option_uv_coords),
"normals" : generate_normals(normals, option_normals),
"colors" : generate_vertex_colors(colors, option_colors),
"materials" : materials_string,
"vertices" : generate_vertices(vertices, option_vertices_truncate, option_vertices),
"faces" : faces_string,
"morphTargets" : morphTargets_string,
"bones" : bones_string,
"indices" : indices_string,
"weights" : weights_string,
"animations" : generate_all_animations(option_animation_skeletal, option_frame_step, flipyz)
}
text = TEMPLATE_FILE_ASCII % {
"nvertex" : len(vertices),
"nface" : nfaces,
"nuvs" : ",".join("%d" % n for n in nuvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : nmaterial,
"nmorphTarget": nmorphTarget,
"nbone" : nbone,
"model" : model_string
}
return text, model_string
# #####################################################
# Model exporter - export single mesh
# #####################################################
def extract_meshes(objects, scene, export_single_model, option_scale, flipyz):
meshes = []
for object in objects:
if object.type == "MESH" and object.THREE_exportGeometry:
# collapse modifiers into mesh
mesh = object.to_mesh(scene, True, 'RENDER')
if not mesh:
raise Exception("Error, could not get mesh data from object [%s]" % object.name)
# preserve original name
mesh.name = object.name
if export_single_model:
if flipyz:
# that's what Blender's native export_obj.py does to flip YZ
X_ROT = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
mesh.transform(X_ROT * object.matrix_world)
else:
mesh.transform(object.matrix_world)
mesh.update(calc_tessface=True)
mesh.calc_normals()
mesh.calc_tessface()
mesh.transform(mathutils.Matrix.Scale(option_scale, 4))
meshes.append([mesh, object])
return meshes
def generate_mesh_string(objects, scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_step):
meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
morphs = []
if option_animation_morph:
original_frame = scene.frame_current # save animation state
scene_frames = range(scene.frame_start, scene.frame_end + 1, option_frame_step)
for index, frame in enumerate(scene_frames):
scene.frame_set(frame, 0.0)
anim_meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
frame_vertices = []
for mesh, object in anim_meshes:
frame_vertices.extend(mesh.vertices[:])
if index == 0:
if align_model == 1:
offset = center(frame_vertices)
elif align_model == 2:
offset = bottom(frame_vertices)
elif align_model == 3:
offset = top(frame_vertices)
else:
offset = False
else:
if offset:
translate(frame_vertices, offset)
morphVertices = generate_vertices(frame_vertices, option_vertices_truncate, option_vertices)
morphs.append(morphVertices)
# remove temp meshes
for mesh, object in anim_meshes:
bpy.data.meshes.remove(mesh)
scene.frame_set(original_frame, 0.0) # restore animation state
text, model_string = generate_ascii_model(meshes, morphs,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_step)
# remove temp meshes
for mesh, object in meshes:
bpy.data.meshes.remove(mesh)
return text, model_string
def export_mesh(objects,
scene, filepath,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step):
"""Export single mesh"""
text, model_string = generate_mesh_string(objects,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_step)
write_file(filepath, text)
print("writing", filepath, "done")
# #####################################################
# Scene exporter - render elements
# #####################################################
def generate_quat(quat):
return TEMPLATE_VEC4 % (quat.x, quat.y, quat.z, quat.w)
def generate_vec4(vec):
return TEMPLATE_VEC4 % (vec[0], vec[1], vec[2], vec[3])
def generate_vec3(vec):
return TEMPLATE_VEC3 % (vec[0], vec[1], vec[2])
def generate_vec2(vec):
return TEMPLATE_VEC2 % (vec[0], vec[1])
def generate_hex(number):
return TEMPLATE_HEX % number
def generate_string(s):
return TEMPLATE_STRING % s
def generate_string_list(src_list):
return ", ".join(generate_string(item) for item in src_list)
def generate_section(label, content):
return TEMPLATE_SECTION % (label, content)
def get_mesh_filename(mesh):
object_id = mesh["data"]["name"]
filename = "%s.js" % sanitize(object_id)
return filename
def generate_material_id_list(materials):
chunks = []
for material in materials:
chunks.append(material.name)
return chunks
def generate_group_id_list(obj):
chunks = []
for group in bpy.data.groups:
if obj.name in group.objects:
chunks.append(group.name)
return chunks
def generate_bool_property(property):
if property:
return "true"
return "false"
# #####################################################
# Scene exporter - objects
# #####################################################
def generate_objects(data):
chunks = []
for obj in data["objects"]:
if obj.type == "MESH" and obj.THREE_exportGeometry:
object_id = obj.name
#if len(obj.modifiers) > 0:
# geo_name = obj.name
#else:
geo_name = obj.data.name
geometry_id = "geo_%s" % geo_name
material_ids = generate_material_id_list(obj.material_slots)
group_ids = generate_group_id_list(obj)
if data["flipyz"]:
matrix_world = ROTATE_X_PI2 * obj.matrix_world
else:
matrix_world = obj.matrix_world
position, quaternion, scale = matrix_world.decompose()
rotation = quaternion.to_euler("ZYX")
# use empty material string for multi-material objects
# this will trigger use of MeshFaceMaterial in SceneLoader
material_string = '""'
if len(material_ids) == 1:
material_string = generate_string_list(material_ids)
group_string = ""
if len(group_ids) > 0:
group_string = generate_string_list(group_ids)
castShadow = obj.THREE_castShadow
receiveShadow = obj.THREE_receiveShadow
doubleSided = obj.THREE_doubleSided
visible = True
geometry_string = generate_string(geometry_id)
object_string = TEMPLATE_OBJECT % {
"object_id" : generate_string(object_id),
"geometry_id" : geometry_string,
"group_id" : group_string,
"material_id" : material_string,
"position" : generate_vec3(position),
"rotation" : generate_vec3(rotation),
"quaternion" : generate_quat(quaternion),
"scale" : generate_vec3(scale),
"castShadow" : generate_bool_property(castShadow),
"receiveShadow" : generate_bool_property(receiveShadow),
"doubleSided" : generate_bool_property(doubleSided),
"visible" : generate_bool_property(visible)
}
chunks.append(object_string)
elif obj.type == "EMPTY" or (obj.type == "MESH" and not obj.THREE_exportGeometry):
object_id = obj.name
group_ids = generate_group_id_list(obj)
if data["flipyz"]:
matrix_world = ROTATE_X_PI2 * obj.matrix_world
else:
matrix_world = obj.matrix_world
position, quaternion, scale = matrix_world.decompose()
rotation = quaternion.to_euler("ZYX")
group_string = ""
if len(group_ids) > 0:
group_string = generate_string_list(group_ids)
object_string = TEMPLATE_EMPTY % {
"object_id" : generate_string(object_id),
"group_id" : group_string,
"position" : generate_vec3(position),
"rotation" : generate_vec3(rotation),
"quaternion" : generate_quat(quaternion),
"scale" : generate_vec3(scale)
}
chunks.append(object_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - geometries
# #####################################################
def generate_geometries(data):
chunks = []
geo_set = set()
for obj in data["objects"]:
if obj.type == "MESH" and obj.THREE_exportGeometry:
#if len(obj.modifiers) > 0:
# name = obj.name
#else:
name = obj.data.name
if name not in geo_set:
geometry_id = "geo_%s" % name
if data["embed_meshes"]:
embed_id = "emb_%s" % name
geometry_string = TEMPLATE_GEOMETRY_EMBED % {
"geometry_id" : generate_string(geometry_id),
"embed_id" : generate_string(embed_id)
}
else:
model_filename = os.path.basename(generate_mesh_filename(name, data["filepath"]))
geometry_string = TEMPLATE_GEOMETRY_LINK % {
"geometry_id" : generate_string(geometry_id),
"model_file" : generate_string(model_filename)
}
chunks.append(geometry_string)
geo_set.add(name)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - textures
# #####################################################
def generate_textures_scene(data):
chunks = []
# TODO: extract just textures actually used by some objects in the scene
for texture in bpy.data.textures:
if texture.type == 'IMAGE' and texture.image:
img = texture.image
texture_id = img.name
texture_file = extract_texture_filename(img)
if data["copy_textures"]:
save_image(img, texture_file, data["filepath"])
extras = ""
if texture.repeat_x != 1 or texture.repeat_y != 1:
extras += ',\n "repeat": [%g, %g]' % (texture.repeat_x, texture.repeat_y)
if texture.extension == "REPEAT":
wrap_x = "repeat"
wrap_y = "repeat"
if texture.use_mirror_x:
wrap_x = "mirror"
if texture.use_mirror_y:
wrap_y = "mirror"
extras += ',\n "wrap": ["%s", "%s"]' % (wrap_x, wrap_y)
texture_string = TEMPLATE_TEXTURE % {
"texture_id" : generate_string(texture_id),
"texture_file" : generate_string(texture_file),
"extras" : extras
}
chunks.append(texture_string)
return ",\n\n".join(chunks), len(chunks)
def extract_texture_filename(image):
fn = bpy.path.abspath(image.filepath)
fn = os.path.normpath(fn)
fn_strip = os.path.basename(fn)
return fn_strip
def save_image(img, name, fpath):
dst_dir = os.path.dirname(fpath)
dst_path = os.path.join(dst_dir, name)
ensure_folder_exist(dst_dir)
if img.packed_file:
img.save_render(dst_path)
else:
src_path = bpy.path.abspath(img.filepath)
shutil.copy(src_path, dst_dir)
# #####################################################
# Scene exporter - materials
# #####################################################
def extract_material_data(m, option_colors):
world = bpy.context.scene.world
material = { 'name': m.name }
material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
m.diffuse_intensity * m.diffuse_color[1],
m.diffuse_intensity * m.diffuse_color[2]]
material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
m.specular_intensity * m.specular_color[1],
m.specular_intensity * m.specular_color[2]]
material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
m.ambient * material['colorDiffuse'][1],
m.ambient * material['colorDiffuse'][2]]
material['transparency'] = m.alpha
# not sure about mapping values to Blinn-Phong shader
# Blender uses INT from [1,511] with default 0
# http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
material["specularCoef"] = m.specular_hardness
material["vertexColors"] = m.THREE_useVertexColors and option_colors
material['mapDiffuse'] = ""
material['mapLight'] = ""
material['mapSpecular'] = ""
material['mapNormal'] = ""
material['mapBump'] = ""
material['mapNormalFactor'] = 1.0
material['mapBumpScale'] = 1.0
textures = guess_material_textures(m)
if textures['diffuse']:
material['mapDiffuse'] = textures['diffuse']['texture'].image.name
if textures['light']:
material['mapLight'] = textures['light']['texture'].image.name
if textures['specular']:
material['mapSpecular'] = textures['specular']['texture'].image.name
if textures['normal']:
material['mapNormal'] = textures['normal']['texture'].image.name
if textures['normal']['slot'].use_map_normal:
material['mapNormalFactor'] = textures['normal']['slot'].normal_factor
if textures['bump']:
material['mapBump'] = textures['bump']['texture'].image.name
if textures['normal']['slot'].use_map_normal:
material['mapBumpScale'] = textures['normal']['slot'].normal_factor
material['shading'] = m.THREE_materialType
material['blending'] = m.THREE_blendingType
material['depthWrite'] = m.THREE_depthWrite
material['depthTest'] = m.THREE_depthTest
material['transparent'] = m.use_transparency
return material
def guess_material_textures(material):
textures = {
'diffuse' : None,
'light' : None,
'normal' : None,
'specular': None,
'bump' : None
}
# just take first textures of each, for the moment three.js materials can't handle more
# assume diffuse comes before lightmap, normalmap has checked flag
for i in range(len(material.texture_slots)):
slot = material.texture_slots[i]
if slot:
texture = slot.texture
if slot.use and texture and texture.type == 'IMAGE':
# normal map in Blender UI: textures => image sampling => normal map
if texture.use_normal_map:
textures['normal'] = { "texture": texture, "slot": slot }
# bump map in Blender UI: textures => influence => geometry => normal
elif slot.use_map_normal:
textures['bump'] = { "texture": texture, "slot": slot }
elif slot.use_map_specular or slot.use_map_hardness:
textures['specular'] = { "texture": texture, "slot": slot }
else:
if not textures['diffuse'] and not slot.blend_type == 'MULTIPLY':
textures['diffuse'] = { "texture": texture, "slot": slot }
else:
textures['light'] = { "texture": texture, "slot": slot }
if textures['diffuse'] and textures['normal'] and textures['light'] and textures['specular'] and textures['bump']:
break
return textures
def generate_material_string(material):
material_id = material["name"]
# default to Lambert
shading = material.get("shading", "Lambert")
# normal and bump mapped materials must use Phong
# to get all required parameters for normal shader
if material['mapNormal'] or material['mapBump']:
shading = "Phong"
type_map = {
"Lambert" : "MeshLambertMaterial",
"Phong" : "MeshPhongMaterial"
}
material_type = type_map.get(shading, "MeshBasicMaterial")
parameters = '"color": %d' % rgb2int(material["colorDiffuse"])
parameters += ', "opacity": %.2g' % material["transparency"]
if shading == "Phong":
parameters += ', "ambient": %d' % rgb2int(material["colorAmbient"])
parameters += ', "specular": %d' % rgb2int(material["colorSpecular"])
parameters += ', "shininess": %.1g' % material["specularCoef"]
colorMap = material['mapDiffuse']
lightMap = material['mapLight']
specularMap = material['mapSpecular']
normalMap = material['mapNormal']
bumpMap = material['mapBump']
normalMapFactor = material['mapNormalFactor']
bumpMapScale = material['mapBumpScale']
if colorMap:
parameters += ', "map": %s' % generate_string(colorMap)
if lightMap:
parameters += ', "lightMap": %s' % generate_string(lightMap)
if specularMap:
parameters += ', "specularMap": %s' % generate_string(specularMap)
if normalMap:
parameters += ', "normalMap": %s' % generate_string(normalMap)
if bumpMap:
parameters += ', "bumpMap": %s' % generate_string(bumpMap)
if normalMapFactor != 1.0:
parameters += ', "normalMapFactor": %g' % normalMapFactor
if bumpMapScale != 1.0:
parameters += ', "bumpMapScale": %g' % bumpMapScale
if material['vertexColors']:
parameters += ', "vertexColors": "vertex"'
if material['transparent']:
parameters += ', "transparent": true'
parameters += ', "blending": "%s"' % material['blending']
if not material['depthWrite']:
parameters += ', "depthWrite": false'
if not material['depthTest']:
parameters += ', "depthTest": false'
material_string = TEMPLATE_MATERIAL_SCENE % {
"material_id" : generate_string(material_id),
"type" : generate_string(material_type),
"parameters" : parameters
}
return material_string
def generate_materials_scene(data):
chunks = []
def material_is_used(mat):
minimum_users = 1
if mat.use_fake_user:
minimum_users = 2 #we must ignore the "fake user" in this case
return mat.users >= minimum_users
used_materials = [m for m in bpy.data.materials if material_is_used(m)]
for m in used_materials:
material = extract_material_data(m, data["use_colors"])
material_string = generate_material_string(material)
chunks.append(material_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - cameras
# #####################################################
def generate_cameras(data):
chunks = []
if data["use_cameras"]:
cams = bpy.data.objects
cams = [ob for ob in cams if (ob.type == 'CAMERA' and ob.select)]
if not cams:
camera = DEFAULTS["camera"]
if camera["type"] == "PerspectiveCamera":
camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
"camera_id" : generate_string(camera["name"]),
"fov" : camera["fov"],
"aspect" : camera["aspect"],
"near" : camera["near"],
"far" : camera["far"],
"position" : generate_vec3(camera["position"]),
"target" : generate_vec3(camera["target"])
}
elif camera["type"] == "OrthographicCamera":
camera_string = TEMPLATE_CAMERA_ORTHO % {
"camera_id" : generate_string(camera["name"]),
"left" : camera["left"],
"right" : camera["right"],
"top" : camera["top"],
"bottom" : camera["bottom"],
"near" : camera["near"],
"far" : camera["far"],
"position" : generate_vec3(camera["position"]),
"target" : generate_vec3(camera["target"])
}
chunks.append(camera_string)
else:
for cameraobj in cams:
camera = bpy.data.cameras[cameraobj.name]
# TODO:
# Support more than perspective camera
# Calculate a target/lookat
# Get correct aspect ratio
if camera.id_data.type == "PERSP":
camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
"camera_id" : generate_string(camera.name),
"fov" : (camera.angle / 3.14) * 180.0,
"aspect" : 1.333,
"near" : camera.clip_start,
"far" : camera.clip_end,
"position" : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]]),
"target" : generate_vec3([0, 0, 0])
}
chunks.append(camera_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - lights
# #####################################################
def generate_lights(data):
chunks = []
if data["use_lights"]:
lights = data.get("lights", [])
if not lights:
lights.append(DEFAULTS["light"])
for light in lights:
if light["type"] == "DirectionalLight":
light_string = TEMPLATE_LIGHT_DIRECTIONAL % {
"light_id" : generate_string(light["name"]),
"direction" : generate_vec3(light["direction"]),
"color" : rgb2int(light["color"]),
"intensity" : light["intensity"]
}
elif light["type"] == "PointLight":
light_string = TEMPLATE_LIGHT_POINT % {
"light_id" : generate_string(light["name"]),
"position" : generate_vec3(light["position"]),
"color" : rgb2int(light["color"]),
"intensity" : light["intensity"]
}
chunks.append(light_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - embedded meshes
# #####################################################
def generate_embeds(data):
if data["embed_meshes"]:
chunks = []
for e in data["embeds"]:
embed = '"emb_%s": {%s}' % (e, data["embeds"][e])
chunks.append(embed)
return ",\n\n".join(chunks)
return ""
# #####################################################
# Scene exporter - generate ASCII scene
# #####################################################
def generate_ascii_scene(data):
objects, nobjects = generate_objects(data)
geometries, ngeometries = generate_geometries(data)
textures, ntextures = generate_textures_scene(data)
materials, nmaterials = generate_materials_scene(data)
lights, nlights = generate_lights(data)
cameras, ncameras = generate_cameras(data)
embeds = generate_embeds(data)
if nlights > 0:
if nobjects > 0:
objects = objects + ",\n\n" + lights
else:
objects = lights
nobjects += nlights
if ncameras > 0:
if nobjects > 0:
objects = objects + ",\n\n" + cameras
else:
objects = cameras
nobjects += ncameras
basetype = "relativeTo"
if data["base_html"]:
basetype += "HTML"
else:
basetype += "Scene"
sections = [
["objects", objects],
["geometries", geometries],
["textures", textures],
["materials", materials],
["embeds", embeds]
]
chunks = []
for label, content in sections:
if content:
chunks.append(generate_section(label, content))
sections_string = "\n".join(chunks)
default_camera = ""
if data["use_cameras"]:
cams = [ob for ob in bpy.data.objects if (ob.type == 'CAMERA' and ob.select)]
if not cams:
default_camera = "default_camera"
else:
default_camera = cams[0].name
parameters = {
"fname" : data["source_file"],
"sections" : sections_string,
"bgcolor" : generate_vec3(DEFAULTS["bgcolor"]),
"bgalpha" : DEFAULTS["bgalpha"],
"defcamera" : generate_string(default_camera),
"nobjects" : nobjects,
"ngeometries" : ngeometries,
"ntextures" : ntextures,
"basetype" : generate_string(basetype),
"nmaterials" : nmaterials,
"position" : generate_vec3(DEFAULTS["position"]),
"rotation" : generate_vec3(DEFAULTS["rotation"]),
"scale" : generate_vec3(DEFAULTS["scale"])
}
text = TEMPLATE_SCENE_ASCII % parameters
return text
def export_scene(scene, filepath, flipyz, option_colors, option_lights, option_cameras, option_embed_meshes, embeds, option_url_base_html, option_copy_textures):
source_file = os.path.basename(bpy.data.filepath)
# objects are contained in scene and linked groups
objects = []
# get scene objects
sceneobjects = scene.objects
for obj in sceneobjects:
objects.append(obj)
scene_text = ""
data = {
"scene" : scene,
"objects" : objects,
"embeds" : embeds,
"source_file" : source_file,
"filepath" : filepath,
"flipyz" : flipyz,
"use_colors" : option_colors,
"use_lights" : option_lights,
"use_cameras" : option_cameras,
"embed_meshes" : option_embed_meshes,
"base_html" : option_url_base_html,
"copy_textures": option_copy_textures
}
scene_text += generate_ascii_scene(data)
write_file(filepath, scene_text)
# #####################################################
# Main
# #####################################################
def save(operator, context, filepath = "",
option_flip_yz = True,
option_vertices = True,
option_vertices_truncate = False,
option_faces = True,
option_normals = True,
option_uv_coords = True,
option_materials = True,
option_colors = True,
option_bones = True,
option_skinning = True,
align_model = 0,
option_export_scene = False,
option_lights = False,
option_cameras = False,
option_scale = 1.0,
option_embed_meshes = True,
option_url_base_html = False,
option_copy_textures = False,
option_animation_morph = False,
option_animation_skeletal = False,
option_frame_step = 1,
option_all_meshes = True):
#print("URL TYPE", option_url_base_html)
filepath = ensure_extension(filepath, '.js')
scene = context.scene
if scene.objects.active:
bpy.ops.object.mode_set(mode='OBJECT')
if option_all_meshes:
sceneobjects = scene.objects
else:
sceneobjects = context.selected_objects
# objects are contained in scene and linked groups
objects = []
# get scene objects
for obj in sceneobjects:
objects.append(obj)
if option_export_scene:
geo_set = set()
embeds = {}
for object in objects:
if object.type == "MESH" and object.THREE_exportGeometry:
# create extra copy of geometry with applied modifiers
# (if they exist)
#if len(object.modifiers) > 0:
# name = object.name
# otherwise can share geometry
#else:
name = object.data.name
if name not in geo_set:
if option_embed_meshes:
text, model_string = generate_mesh_string([object], scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
False, # align_model
option_flip_yz,
option_scale,
False, # export_single_model
False, # option_copy_textures
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_step)
embeds[object.data.name] = model_string
else:
fname = generate_mesh_filename(name, filepath)
export_mesh([object], scene,
fname,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
False, # align_model
option_flip_yz,
option_scale,
False, # export_single_model
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step)
geo_set.add(name)
export_scene(scene, filepath,
option_flip_yz,
option_colors,
option_lights,
option_cameras,
option_embed_meshes,
embeds,
option_url_base_html,
option_copy_textures)
else:
export_mesh(objects, scene, filepath,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
option_flip_yz,
option_scale,
True, # export_single_model
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step)
return {'FINISHED'}
| bsd-3-clause |
aio-libs/aiomysql | tests/test_cursor.py | 1 | 9444 | import asyncio
import pytest
from aiomysql import ProgrammingError, Cursor, InterfaceError
async def _prepare(conn):
cur = await conn.cursor()
await cur.execute("DROP TABLE IF EXISTS tbl;")
await cur.execute("""CREATE TABLE tbl (
id MEDIUMINT NOT NULL AUTO_INCREMENT,
name VARCHAR(255) NOT NULL,
PRIMARY KEY (id));""")
for i in [(1, 'a'), (2, 'b'), (3, 'c')]:
await cur.execute("INSERT INTO tbl VALUES(%s, %s)", i)
await cur.execute("DROP TABLE IF EXISTS tbl2")
await cur.execute("""CREATE TABLE tbl2
(id int, name varchar(255))""")
await conn.commit()
async def _prepare_procedure(conn):
cur = await conn.cursor()
await cur.execute("DROP PROCEDURE IF EXISTS myinc;")
await cur.execute("""CREATE PROCEDURE myinc(p1 INT)
BEGIN
SELECT p1 + 1;
END
""")
await conn.commit()
@pytest.mark.run_loop
async def test_description(connection_creator):
conn = await connection_creator()
await _prepare(conn)
cur = await conn.cursor()
assert cur.description is None
await cur.execute('SELECT * from tbl;')
assert len(cur.description) == 2, \
'cursor.description describes too many columns'
assert len(cur.description[0]) == 7, \
'cursor.description[x] tuples must have 7 elements'
assert cur.description[0][0].lower() == 'id', \
'cursor.description[x][0] must return column name'
assert cur.description[1][0].lower() == 'name', \
'cursor.description[x][0] must return column name'
# Make sure self.description gets reset, cursor should be
# set to None in case of none resulting queries like DDL
await cur.execute('DROP TABLE IF EXISTS foobar;')
assert cur.description is None
@pytest.mark.run_loop
async def test_cursor_properties(connection_creator):
conn = await connection_creator()
cur = await conn.cursor()
assert cur.connection is conn
cur.setinputsizes()
cur.setoutputsizes()
assert cur.echo == conn.echo
@pytest.mark.run_loop
async def test_scroll_relative(connection_creator):
conn = await connection_creator()
await _prepare(conn)
cur = await conn.cursor()
await cur.execute('SELECT * FROM tbl;')
await cur.scroll(1)
ret = await cur.fetchone()
assert (2, 'b') == ret
@pytest.mark.run_loop
async def test_scroll_absolute(connection_creator):
conn = await connection_creator()
await _prepare(conn)
cur = await conn.cursor()
await cur.execute('SELECT * FROM tbl;')
await cur.scroll(2, mode='absolute')
ret = await cur.fetchone()
assert (3, 'c') == ret
@pytest.mark.run_loop
async def test_scroll_errors(connection_creator):
conn = await connection_creator()
cur = await conn.cursor()
with pytest.raises(ProgrammingError):
await cur.scroll(2, mode='absolute')
cur = await conn.cursor()
await cur.execute('SELECT * FROM tbl;')
with pytest.raises(ProgrammingError):
await cur.scroll(2, mode='not_valid_mode')
@pytest.mark.run_loop
async def test_scroll_index_error(connection_creator):
conn = await connection_creator()
await _prepare(conn)
cur = await conn.cursor()
await cur.execute('SELECT * FROM tbl;')
with pytest.raises(IndexError):
await cur.scroll(1000)
@pytest.mark.run_loop
async def test_close(connection_creator):
conn = await connection_creator()
cur = await conn.cursor()
await cur.close()
assert cur.closed is True
with pytest.raises(ProgrammingError):
await cur.execute('SELECT 1')
# try to close for second time
await cur.close()
@pytest.mark.run_loop
async def test_arraysize(connection_creator):
conn = await connection_creator()
cur = await conn.cursor()
assert 1 == cur.arraysize
cur.arraysize = 10
assert 10 == cur.arraysize
@pytest.mark.run_loop
async def test_rows(connection_creator):
conn = await connection_creator()
await _prepare(conn)
cur = await conn.cursor()
await cur.execute('SELECT * from tbl')
assert 3 == cur.rowcount
assert 0 == cur.rownumber
await cur.fetchone()
assert 1 == cur.rownumber
assert cur.lastrowid is None
await cur.execute('INSERT INTO tbl VALUES (%s, %s)', (4, 'd'))
assert 0 != cur.lastrowid
await conn.commit()
@pytest.mark.run_loop
async def test_callproc(connection_creator):
conn = await connection_creator()
await _prepare_procedure(conn)
cur = await conn.cursor()
await cur.callproc('myinc', [1])
ret = await cur.fetchone()
assert (2,) == ret
await cur.close()
with pytest.raises(ProgrammingError):
await cur.callproc('myinc', [1])
conn.close()
@pytest.mark.run_loop
async def test_fetchone_no_result(connection_creator):
# test a fetchone() with no rows
conn = await connection_creator()
c = await conn.cursor()
await c.execute("create table test_nr (b varchar(32))")
try:
data = "pymysql"
await c.execute("insert into test_nr (b) values (%s)", (data,))
r = await c.fetchone()
assert r is None
finally:
await c.execute("drop table test_nr")
@pytest.mark.run_loop
async def test_fetchmany_no_result(connection_creator):
conn = await connection_creator()
cur = await conn.cursor()
await cur.execute('DROP TABLE IF EXISTS foobar;')
r = await cur.fetchmany()
assert [] == r
@pytest.mark.run_loop
async def test_fetchall_no_result(connection_creator):
# test a fetchone() with no rows
conn = await connection_creator()
cur = await conn.cursor()
await cur.execute('DROP TABLE IF EXISTS foobar;')
r = await cur.fetchall()
assert [] == r
@pytest.mark.run_loop
async def test_fetchall_with_scroll(connection_creator):
conn = await connection_creator()
await _prepare(conn)
cur = await conn.cursor()
await cur.execute('SELECT * FROM tbl;')
await cur.scroll(1)
ret = await cur.fetchall()
assert ((2, 'b'), (3, 'c')) == ret
@pytest.mark.run_loop
async def test_aggregates(connection_creator):
""" test aggregate functions """
conn = await connection_creator()
c = await conn.cursor()
try:
await c.execute('create table test_aggregates (i integer)')
for i in range(0, 10):
await c.execute(
'insert into test_aggregates (i) values (%s)', (i,))
await c.execute('select sum(i) from test_aggregates')
r, = await c.fetchone()
assert sum(range(0, 10)) == r
finally:
await c.execute('drop table test_aggregates')
@pytest.mark.run_loop
async def test_single_tuple(connection_creator):
""" test a single tuple """
conn = await connection_creator()
c = await conn.cursor()
try:
await c.execute(
"create table mystuff (id integer primary key)")
await c.execute("insert into mystuff (id) values (1)")
await c.execute("insert into mystuff (id) values (2)")
await c.execute("select id from mystuff where id in %s", ((1,),))
r = await c.fetchall()
assert [(1,)] == list(r)
finally:
await c.execute("drop table mystuff")
@pytest.mark.run_loop
async def test_executemany(connection_creator):
conn = await connection_creator()
await _prepare(conn)
cur = await conn.cursor()
assert cur.description is None
args = [1, 2, 3]
row_count = await cur.executemany(
'SELECT * FROM tbl WHERE id = %s;', args)
assert row_count == 3
r = await cur.fetchall()
# TODO: if this right behaviour
assert ((3, 'c'),) == r
# calling execute many without args
row_count = await cur.executemany('SELECT 1;', ())
assert row_count is None
@pytest.mark.run_loop
async def test_custom_cursor(connection_creator):
class MyCursor(Cursor):
pass
conn = await connection_creator()
cur = await conn.cursor(MyCursor)
assert isinstance(cur, MyCursor)
await cur.execute("SELECT 42;")
(r, ) = await cur.fetchone()
assert r == 42
@pytest.mark.run_loop
async def test_custom_cursor_not_cursor_subclass(connection_creator):
class MyCursor2:
pass
conn = await connection_creator()
with pytest.raises(TypeError):
await conn.cursor(MyCursor2)
@pytest.mark.run_loop
async def test_morgify(connection_creator):
conn = await connection_creator()
cur = await conn.cursor()
pairs = [(1, 'a'), (2, 'b'), (3, 'c')]
sql = "INSERT INTO tbl VALUES(%s, %s)"
results = [cur.mogrify(sql, p) for p in pairs]
expected = ["INSERT INTO tbl VALUES(1, 'a')",
"INSERT INTO tbl VALUES(2, 'b')",
"INSERT INTO tbl VALUES(3, 'c')"]
assert results == expected
@pytest.mark.run_loop
async def test_execute_cancel(connection_creator):
conn = await connection_creator()
cur = await conn.cursor()
# Cancel a cursor in the middle of execution, before it could
# read even the first packet (SLEEP assures the timings)
task = asyncio.ensure_future(cur.execute(
"SELECT 1 as id, SLEEP(0.1) as xxx"))
await asyncio.sleep(0.05)
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
with pytest.raises(InterfaceError):
await conn.cursor()
| mit |
benthomasson/ansible | lib/ansible/utils/module_docs_fragments/backup.py | 427 | 1071 | # Copyright (c) 2015 Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
'''
| gpl-3.0 |
davipeterlini/routeflow_tcc | rfserver/rfserver.py | 2 | 21060 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import sys
import logging
import binascii
import threading
import time
import argparse
from bson.binary import Binary
import rflib.ipc.Ipc as Ipc
import rflib.ipc.MongoIpc as MongoIpc
from rflib.ipc.RFProtocol import *
from rflib.defs import *
from rflib.types.Match import *
from rflib.types.Action import *
from rflib.types.Option import *
from rftable import *
# Register actions
REGISTER_IDLE = 0
REGISTER_ASSOCIATED = 1
REGISTER_ISL = 2
class RFServer(Ipc.IpcMessageProcessor):
def __init__(self, configfile, islconffile):
self.rftable = RFTable()
self.isltable = RFISLTable()
self.config = RFConfig(configfile)
self.islconf = RFISLConf(islconffile)
# Logging
self.log = logging.getLogger("rfserver")
self.log.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
self.log.addHandler(ch)
#self.ipc = MongoIPC.MongoIPCMessageService(MONGO_ADDRESS,
# MONGO_DB_NAME,
# RFSERVER_ID,
# threading.Thread,
# time.sleep)
self.ipc_rfclient = MongoIpc.MongoIpc(RFSERVER_ID, RFCLIENT_RFSERVER_CHANNEL)
self.ipc_rfproxy = MongoIpc.MongoIpc(RFSERVER_ID, RFSERVER_RFPROXY_CHANNEL)
self.ipc_rfclient.parallel_listen(self)
self.ipc_rfproxy.listen(self)
def process(self, msg):
type_ = msg.get_type()
if type_ == PORT_REGISTER:
self.register_vm_port(msg.get_vm_id(), msg.get_vm_port(),
msg.get_hwaddress())
elif type_ == ROUTE_MOD:
self.register_route_mod(msg)
elif type_ == DATAPATH_PORT_REGISTER:
self.register_dp_port(msg.get_ct_id(),
msg.get_dp_id(),
msg.get_dp_port())
elif type_ == DATAPATH_DOWN:
self.set_dp_down(msg.get_ct_id(), msg.get_dp_id())
elif type_ == VIRTUAL_PLANE_MAP:
self.map_port(msg.get_vm_id(), msg.get_vm_port(),
msg.get_vs_id(), msg.get_vs_port())
# Port register methods
def register_vm_port(self, vm_id, vm_port, eth_addr):
action = None
config_entry = self.config.get_config_for_vm_port(vm_id, vm_port)
if config_entry is None:
# Register idle VM awaiting for configuration
action = REGISTER_IDLE
else:
entry = self.rftable.get_entry_by_dp_port(config_entry.ct_id,
config_entry.dp_id,
config_entry.dp_port)
# If there's no entry, we have no DP, register VM as idle
if entry is None:
action = REGISTER_IDLE
# If there's an idle DP entry matching configuration, associate
elif entry.get_status() == RFENTRY_IDLE_DP_PORT:
action = REGISTER_ASSOCIATED
# Apply action
if action == REGISTER_IDLE:
self.rftable.set_entry(RFEntry(vm_id=vm_id, vm_port=vm_port,
eth_addr=eth_addr))
self.log.info("Registering client port as idle (vm_id=%s, "
"vm_port=%i, eth_addr=%s)" % (format_id(vm_id),
vm_port, eth_addr))
elif action == REGISTER_ASSOCIATED:
entry.associate(vm_id, vm_port, eth_addr=eth_addr)
self.rftable.set_entry(entry)
self.config_vm_port(vm_id, vm_port)
self.log.info("Registering client port and associating to "
"datapath port (vm_id=%s, vm_port=%i, "
"eth_addr = %s, dp_id=%s, dp_port=%s)"
% (format_id(vm_id), vm_port, eth_addr,
format_id(entry.dp_id), entry.dp_port))
def config_vm_port(self, vm_id, vm_port):
message = PortConfig(vm_id=vm_id, vm_port=vm_port, operation_id=0)
message.set_from(RFSERVER_ID)
message.set_to(str(vm_id))
self.ipc_rfclient.send(message)
self.log.info("Asking client for mapping message for port "
"(vm_id=%s, vm_port=%i)" % (format_id(vm_id), vm_port))
# Handle RouteMod messages (type ROUTE_MOD)
#
# Takes a RouteMod, replaces its VM id,port with the associated DP id,port
# and sends to the corresponding controller
def register_route_mod(self, rm):
vm_id = rm.get_id()
# Find the output action
for i, action in enumerate(rm.actions):
if action['type'] is RFAT_OUTPUT:
# Put the action in an action object for easy modification
action_output = Action.from_dict(action)
vm_port = action_output.get_value()
# Find the (vmid, vm_port), (dpid, dpport) pair
entry = self.rftable.get_entry_by_vm_port(vm_id, vm_port)
# If we can't find an associated datapath for this RouteMod,
# drop it.
if entry is None or entry.get_status() == RFENTRY_IDLE_VM_PORT:
self.log.info("Received RouteMod destined for unknown "
"datapath - Dropping (vm_id=%s)" %
(format_id(vm_id)))
return
# Replace the VM id,port with the Datapath id.port
rm.set_id(int(entry.dp_id))
if rm.get_mod() is RMT_DELETE:
# When deleting a route, we don't need an output action.
rm.actions.remove(action)
else:
# Replace the VM port with the datapath port
action_output.set_value(entry.dp_port)
rm.actions[i] = action_output.to_dict()
entries = self.rftable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id)
entries.extend(self.isltable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id))
rm.add_option(Option.CT_ID(entry.ct_id))
self._send_rm_with_matches(rm, entry.dp_port, entries)
remote_dps = self.isltable.get_entries(rem_ct=entry.ct_id,
rem_id=entry.dp_id)
for r in remote_dps:
if r.get_status() == RFISL_ACTIVE:
rm.set_options(rm.get_options()[:-1])
rm.add_option(Option.CT_ID(r.ct_id))
rm.set_id(int(r.dp_id))
rm.set_actions(None)
rm.add_action(Action.SET_ETH_SRC(r.eth_addr))
rm.add_action(Action.SET_ETH_DST(r.rem_eth_addr))
rm.add_action(Action.OUTPUT(r.dp_port))
entries = self.rftable.get_entries(dp_id=r.dp_id,
ct_id=r.ct_id)
self._send_rm_with_matches(rm, r.dp_port, entries)
return
# If no output action is found, don't forward the routemod.
self.log.info("Received RouteMod with no Output Port - Dropping "
"(vm_id=%s)" % (format_id(vm_id)))
def _send_rm_with_matches(self, rm, out_port, entries):
#send entries matching external ports
for entry in entries:
if out_port != entry.dp_port:
if entry.get_status() == RFENTRY_ACTIVE or \
entry.get_status() == RFISL_ACTIVE:
rm.add_match(Match.ETHERNET(entry.eth_addr))
rm.add_match(Match.IN_PORT(entry.dp_port))
rm.set_from(RFSERVER_ID)
rm.set_to(str(entry.ct_id))
self.ipc_rfproxy.send(rm)
rm.set_matches(rm.get_matches()[:-2])
# DatapathPortRegister methods
def register_dp_port(self, ct_id, dp_id, dp_port):
stop = self.config_dp(ct_id, dp_id)
if stop:
return
# The logic down here is pretty much the same as register_vm_port
action = None
config_entry = self.config.get_config_for_dp_port(ct_id, dp_id,
dp_port)
if config_entry is None:
islconfs = self.islconf.get_entries_by_port(ct_id, dp_id, dp_port)
if islconfs:
action = REGISTER_ISL
else:
# Register idle DP awaiting for configuration
action = REGISTER_IDLE
else:
entry = self.rftable.get_entry_by_vm_port(config_entry.vm_id,
config_entry.vm_port)
# If there's no entry, we have no VM, register DP as idle
if entry is None:
action = REGISTER_IDLE
# If there's an idle VM entry matching configuration, associate
elif entry.get_status() == RFENTRY_IDLE_VM_PORT:
action = REGISTER_ASSOCIATED
# Apply action
if action == REGISTER_IDLE:
self.rftable.set_entry(RFEntry(ct_id=ct_id, dp_id=dp_id,
dp_port=dp_port))
self.log.info("Registering datapath port as idle (dp_id=%s, "
"dp_port=%i)" % (format_id(dp_id), dp_port))
elif action == REGISTER_ASSOCIATED:
entry.associate(dp_id, dp_port, ct_id)
self.rftable.set_entry(entry)
self.config_vm_port(entry.vm_id, entry.vm_port)
self.log.info("Registering datapath port and associating to "
"client port (dp_id=%s, dp_port=%i, vm_id=%s, "
"vm_port=%s)" % (format_id(dp_id), dp_port,
format_id(entry.vm_id),
entry.vm_port))
elif action == REGISTER_ISL:
self._register_islconf(islconfs, ct_id, dp_id, dp_port)
def _register_islconf(self, c_entries, ct_id, dp_id, dp_port):
for conf in c_entries:
entry = None
eth_addr = None
if conf.rem_id != dp_id or conf.rem_ct != ct_id:
entry = self.isltable.get_entry_by_addr(conf.rem_ct,
conf.rem_id,
conf.rem_port,
conf.rem_eth_addr)
eth_addr = conf.eth_addr
else:
entry = self.isltable.get_entry_by_addr(conf.ct_id,
conf.dp_id,
conf.dp_port,
conf.eth_addr)
eth_addr = conf.rem_eth_addr
if entry is None:
n_entry = RFISLEntry(vm_id=conf.vm_id, ct_id=ct_id,
dp_id=dp_id, dp_port=dp_port,
eth_addr=eth_addr)
self.isltable.set_entry(n_entry)
self.log.info("Registering ISL port as idle "
"(dp_id=%s, dp_port=%i, eth_addr=%s)" %
(format_id(dp_id), dp_port, eth_addr))
elif entry.get_status() == RFISL_IDLE_DP_PORT:
entry.associate(ct_id, dp_id, dp_port, eth_addr)
self.isltable.set_entry(entry)
n_entry = self.isltable.get_entry_by_remote(entry.ct_id,
entry.dp_id,
entry.dp_port,
entry.eth_addr)
if n_entry is None:
n_entry = RFISLEntry(vm_id=entry.vm_id, ct_id=ct_id,
dp_id=dp_id, dp_port=dp_port,
eth_addr=entry.rem_eth_addr,
rem_ct=entry.ct_id,
rem_id=entry.dp_id,
rem_port=entry.dp_port,
rem_eth_addr=entry.eth_addr)
self.isltable.set_entry(n_entry)
else:
n_entry.associate(ct_id, dp_id, dp_port, eth_addr)
self.isltable.set_entry(n_entry)
self.log.info("Registering ISL port and associating to "
"remote ISL port (ct_id=%s, dp_id=%s, "
"dp_port=%s, rem_ct=%s, rem_id=%s, "
"rem_port=%s)" % (ct_id, format_id(dp_id),
dp_port, entry.ct_id,
format_id(entry.dp_id),
entry.dp_port))
def send_datapath_config_message(self, ct_id, dp_id, operation_id):
rm = RouteMod(RMT_ADD, dp_id)
if operation_id == DC_CLEAR_FLOW_TABLE:
rm.set_mod(RMT_DELETE)
rm.add_option(Option.PRIORITY(PRIORITY_LOWEST))
elif operation_id == DC_DROP_ALL:
rm.add_option(Option.PRIORITY(PRIORITY_LOWEST + PRIORITY_BAND))
# No action specifies discard
pass
else:
rm.add_option(Option.PRIORITY(PRIORITY_HIGH))
if operation_id == DC_RIPV2:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(Match.NW_PROTO(IPPROTO_UDP))
rm.add_match(Match.IPV4(IPADDR_RIPv2, IPV4_MASK_EXACT))
elif operation_id == DC_OSPF:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(Match.NW_PROTO(IPPROTO_OSPF))
elif operation_id == DC_ARP:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_ARP))
elif operation_id == DC_ICMP:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(Match.NW_PROTO(IPPROTO_ICMP))
elif operation_id == DC_ICMPV6:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IPV6))
rm.add_match(Match.NW_PROTO(IPPROTO_ICMPV6))
elif operation_id == DC_BGP_PASSIVE:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(Match.NW_PROTO(IPPROTO_TCP))
rm.add_match(Match.TP_DST(TPORT_BGP))
elif operation_id == DC_BGP_ACTIVE:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(Match.NW_PROTO(IPPROTO_TCP))
rm.add_match(Match.TP_SRC(TPORT_BGP))
elif operation_id == DC_LDP_PASSIVE:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(Match.NW_PROTO(IPPROTO_TCP))
rm.add_match(Match.TP_DST(TPORT_LDP))
elif operation_id == DC_LDP_ACTIVE:
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(Match.NW_PROTO(IPPROTO_TCP))
rm.add_match(Match.TP_SRC(TPORT_LDP))
elif operation_id == DC_VM_INFO:
rm.add_match(Match.ETHERTYPE(RF_ETH_PROTO))
rm.add_action(Action.CONTROLLER())
rm.add_option(Option.CT_ID(ct_id))
rm.set_from(RFSERVER_ID)
rm.set_to(str(ct_id))
self.ipc_rfproxy.send(rm)
def config_dp(self, ct_id, dp_id):
if is_rfvs(dp_id):
# TODO: support more than one OVS
self.send_datapath_config_message(ct_id, dp_id, DC_ALL)
self.log.info("Configuring RFVS (dp_id=%s)" % format_id(dp_id))
elif self.rftable.is_dp_registered(ct_id, dp_id) or \
self.isltable.is_dp_registered(ct_id, dp_id):
# Configure a normal switch. Clear the tables and install default
# flows.
self.send_datapath_config_message(ct_id, dp_id,
DC_CLEAR_FLOW_TABLE)
# TODO: enforce order: clear should always be executed first
self.send_datapath_config_message(ct_id, dp_id, DC_DROP_ALL)
self.send_datapath_config_message(ct_id, dp_id, DC_OSPF)
self.send_datapath_config_message(ct_id, dp_id, DC_BGP_PASSIVE)
self.send_datapath_config_message(ct_id, dp_id, DC_BGP_ACTIVE)
self.send_datapath_config_message(ct_id, dp_id, DC_RIPV2)
self.send_datapath_config_message(ct_id, dp_id, DC_ARP)
self.send_datapath_config_message(ct_id, dp_id, DC_ICMP)
self.send_datapath_config_message(ct_id, dp_id, DC_ICMPV6)
self.send_datapath_config_message(ct_id, dp_id, DC_LDP_PASSIVE)
self.send_datapath_config_message(ct_id, dp_id, DC_LDP_ACTIVE)
self.log.info("Configuring datapath (dp_id=%s)" % format_id(dp_id))
return is_rfvs(dp_id)
# DatapathDown methods
def set_dp_down(self, ct_id, dp_id):
for entry in self.rftable.get_dp_entries(ct_id, dp_id):
# For every port registered in that datapath, put it down
self.set_dp_port_down(entry.ct_id, entry.dp_id, entry.dp_port)
for entry in self.isltable.get_dp_entries(ct_id, dp_id):
entry.make_idle(RFISL_IDLE_REMOTE)
self.isltable.set_entry(entry)
for entry in self.isltable.get_entries(rem_ct=ct_id, rem_id=dp_id):
entry.make_idle(RFISL_IDLE_DP_PORT)
self.isltable.set_entry(entry)
self.log.info("Datapath down (dp_id=%s)" % format_id(dp_id))
def set_dp_port_down(self, ct_id, dp_id, dp_port):
entry = self.rftable.get_entry_by_dp_port(ct_id, dp_id, dp_port)
if entry is not None:
# If the DP port is registered, delete it and leave only the
# associated VM port. Reset this VM port so it can be reused.
vm_id, vm_port = entry.vm_id, entry.vm_port
entry.make_idle(RFENTRY_IDLE_VM_PORT)
self.rftable.set_entry(entry)
if vm_id is not None:
self.reset_vm_port(vm_id, vm_port)
self.log.debug("Datapath port down (dp_id=%s, dp_port=%i)" %
(format_id(dp_id), dp_port))
def reset_vm_port(self, vm_id, vm_port):
if vm_id is None:
return
message = PortConfig(vm_id=vm_id, vm_port=vm_port, operation_id=1)
message.set_from(RFSERVER_ID)
message.set_to(str(vm_id))
self.ipc_rfclient.send(message)
self.log.info("Resetting client port (vm_id=%s, vm_port=%i)" %
(format_id(vm_id), vm_port))
# PortMap methods
def map_port(self, vm_id, vm_port, vs_id, vs_port):
entry = self.rftable.get_entry_by_vm_port(vm_id, vm_port)
if entry is not None and entry.get_status() == RFENTRY_ASSOCIATED:
# If the association is valid, activate it
entry.activate(vs_id, vs_port)
self.rftable.set_entry(entry)
msg = DataPlaneMap(ct_id=entry.ct_id,
dp_id=entry.dp_id, dp_port=entry.dp_port,
vs_id=vs_id, vs_port=vs_port)
msg.set_from(RFSERVER_ID)
msg.set_to(str(entry.ct_id))
self.ipc_rfproxy.send(msg)
self.log.info("Mapping client-datapath association "
"(vm_id=%s, vm_port=%i, dp_id=%s, "
"dp_port=%i, vs_id=%s, vs_port=%i)" %
(format_id(entry.vm_id), entry.vm_port,
format_id(entry.dp_id), entry.dp_port,
format_id(entry.vs_id), entry.vs_port))
if __name__ == "__main__":
description='RFServer co-ordinates RFClient and RFProxy instances, ' \
'listens for route updates, and configures flow tables'
epilog='Report bugs to: https://github.com/routeflow/RouteFlow/issues'
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('configfile',
help='VM-VS-DP mapping configuration file')
parser.add_argument('-i', '--islconfig',
help='ISL mapping configuration file')
args = parser.parse_args()
try:
RFServer(args.configfile, args.islconfig)
except IOError:
sys.exit("Error opening file: {}".format(args.configfile))
| apache-2.0 |
vipul-sharma20/oh-mainline | vendor/packages/gdata/tests/gdata_tests/apps_test.py | 128 | 22040 | #!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
from gdata import test_data
import gdata.apps
class AppsEmailListRecipientFeedTest(unittest.TestCase):
def setUp(self):
self.rcpt_feed = gdata.apps.EmailListRecipientFeedFromString(
test_data.EMAIL_LIST_RECIPIENT_FEED)
def testEmailListRecipientEntryCount(self):
"""Count EmailListRecipient entries in EmailListRecipientFeed"""
self.assertEquals(len(self.rcpt_feed.entry), 2)
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.rcpt_feed.GetSelfLink() is not None)
self.assert_(self.rcpt_feed.GetNextLink() is not None)
self.assert_(self.rcpt_feed.GetEditLink() is None)
self.assert_(self.rcpt_feed.GetHtmlLink() is None)
def testStartItem(self):
"""Tests the existence of <openSearch:startIndex> in
EmailListRecipientFeed and verifies the value"""
self.assert_(isinstance(self.rcpt_feed.start_index, gdata.StartIndex),
"EmailListRecipient feed <openSearch:startIndex> element must be " +
"an instance of gdata.OpenSearch: %s" % self.rcpt_feed.start_index)
self.assertEquals(self.rcpt_feed.start_index.text, "1")
def testEmailListRecipientEntries(self):
"""Tests the existence of <atom:entry> in EmailListRecipientFeed
and simply verifies the value"""
for a_entry in self.rcpt_feed.entry:
self.assert_(isinstance(a_entry, gdata.apps.EmailListRecipientEntry),
"EmailListRecipient Feed <atom:entry> must be an instance of " +
"apps.EmailListRecipientEntry: %s" % a_entry)
self.assertEquals(self.rcpt_feed.entry[0].who.email, "joe@example.com")
self.assertEquals(self.rcpt_feed.entry[1].who.email, "susan@example.com")
class AppsEmailListFeedTest(unittest.TestCase):
def setUp(self):
self.list_feed = gdata.apps.EmailListFeedFromString(
test_data.EMAIL_LIST_FEED)
def testEmailListEntryCount(self):
"""Count EmailList entries in EmailListFeed"""
self.assertEquals(len(self.list_feed.entry), 2)
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.list_feed.GetSelfLink() is not None)
self.assert_(self.list_feed.GetNextLink() is not None)
self.assert_(self.list_feed.GetEditLink() is None)
self.assert_(self.list_feed.GetHtmlLink() is None)
def testStartItem(self):
"""Tests the existence of <openSearch:startIndex> in EmailListFeed
and verifies the value"""
self.assert_(isinstance(self.list_feed.start_index, gdata.StartIndex),
"EmailList feed <openSearch:startIndex> element must be an instance " +
"of gdata.OpenSearch: %s" % self.list_feed.start_index)
self.assertEquals(self.list_feed.start_index.text, "1")
def testUserEntries(self):
"""Tests the existence of <atom:entry> in EmailListFeed and simply
verifies the value"""
for a_entry in self.list_feed.entry:
self.assert_(isinstance(a_entry, gdata.apps.EmailListEntry),
"EmailList Feed <atom:entry> must be an instance of " +
"apps.EmailListEntry: %s" % a_entry)
self.assertEquals(self.list_feed.entry[0].email_list.name, "us-sales")
self.assertEquals(self.list_feed.entry[1].email_list.name, "us-eng")
class AppsUserFeedTest(unittest.TestCase):
def setUp(self):
self.user_feed = gdata.apps.UserFeedFromString(test_data.USER_FEED)
def testUserEntryCount(self):
"""Count User entries in UserFeed"""
self.assertEquals(len(self.user_feed.entry), 2)
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.user_feed.GetSelfLink() is not None)
self.assert_(self.user_feed.GetNextLink() is not None)
self.assert_(self.user_feed.GetEditLink() is None)
self.assert_(self.user_feed.GetHtmlLink() is None)
def testStartItem(self):
"""Tests the existence of <openSearch:startIndex> in UserFeed and
verifies the value"""
self.assert_(isinstance(self.user_feed.start_index, gdata.StartIndex),
"User feed <openSearch:startIndex> element must be an instance " +
"of gdata.OpenSearch: %s" % self.user_feed.start_index)
self.assertEquals(self.user_feed.start_index.text, "1")
def testUserEntries(self):
"""Tests the existence of <atom:entry> in UserFeed and simply
verifies the value"""
for a_entry in self.user_feed.entry:
self.assert_(isinstance(a_entry, gdata.apps.UserEntry),
"User Feed <atom:entry> must be an instance of " +
"apps.UserEntry: %s" % a_entry)
self.assertEquals(self.user_feed.entry[0].login.user_name, "TestUser")
self.assertEquals(self.user_feed.entry[0].who.email,
"TestUser@example.com")
self.assertEquals(self.user_feed.entry[1].login.user_name, "JohnSmith")
self.assertEquals(self.user_feed.entry[1].who.email,
"JohnSmith@example.com")
class AppsNicknameFeedTest(unittest.TestCase):
def setUp(self):
self.nick_feed = gdata.apps.NicknameFeedFromString(test_data.NICK_FEED)
def testNicknameEntryCount(self):
"""Count Nickname entries in NicknameFeed"""
self.assertEquals(len(self.nick_feed.entry), 2)
def testId(self):
"""Tests the existence of <atom:id> in NicknameFeed and verifies
the value"""
self.assert_(isinstance(self.nick_feed.id, atom.Id),
"Nickname feed <atom:id> element must be an instance of " +
"atom.Id: %s" % self.nick_feed.id)
self.assertEquals(self.nick_feed.id.text,
"http://apps-apis.google.com/a/feeds/example.com/nickname/2.0")
def testStartItem(self):
"""Tests the existence of <openSearch:startIndex> in NicknameFeed
and verifies the value"""
self.assert_(isinstance(self.nick_feed.start_index, gdata.StartIndex),
"Nickname feed <openSearch:startIndex> element must be an instance " +
"of gdata.OpenSearch: %s" % self.nick_feed.start_index)
self.assertEquals(self.nick_feed.start_index.text, "1")
def testItemsPerPage(self):
"""Tests the existence of <openSearch:itemsPerPage> in
NicknameFeed and verifies the value"""
self.assert_(isinstance(self.nick_feed.items_per_page, gdata.ItemsPerPage),
"Nickname feed <openSearch:itemsPerPage> element must be an " +
"instance of gdata.ItemsPerPage: %s" % self.nick_feed.items_per_page)
self.assertEquals(self.nick_feed.items_per_page.text, "2")
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.nick_feed.GetSelfLink() is not None)
self.assert_(self.nick_feed.GetEditLink() is None)
self.assert_(self.nick_feed.GetHtmlLink() is None)
def testNicknameEntries(self):
"""Tests the existence of <atom:entry> in NicknameFeed and simply
verifies the value"""
for a_entry in self.nick_feed.entry:
self.assert_(isinstance(a_entry, gdata.apps.NicknameEntry),
"Nickname Feed <atom:entry> must be an instance of " +
"apps.NicknameEntry: %s" % a_entry)
self.assertEquals(self.nick_feed.entry[0].nickname.name, "Foo")
self.assertEquals(self.nick_feed.entry[1].nickname.name, "Bar")
class AppsEmailListRecipientEntryTest(unittest.TestCase):
def setUp(self):
self.rcpt_entry = gdata.apps.EmailListRecipientEntryFromString(
test_data.EMAIL_LIST_RECIPIENT_ENTRY)
def testId(self):
"""Tests the existence of <atom:id> in EmailListRecipientEntry and
verifies the value"""
self.assert_(
isinstance(self.rcpt_entry.id, atom.Id),
"EmailListRecipient entry <atom:id> element must be an instance of " +
"atom.Id: %s" %
self.rcpt_entry.id)
self.assertEquals(
self.rcpt_entry.id.text,
'https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/' +
'recipient/TestUser%40example.com')
def testUpdated(self):
"""Tests the existence of <atom:updated> in
EmailListRecipientEntry and verifies the value"""
self.assert_(
isinstance(self.rcpt_entry.updated, atom.Updated),
"EmailListRecipient entry <atom:updated> element must be an instance " +
"of atom.Updated: %s" % self.rcpt_entry.updated)
self.assertEquals(self.rcpt_entry.updated.text,
'1970-01-01T00:00:00.000Z')
def testCategory(self):
"""Tests the existence of <atom:category> in
EmailListRecipientEntry and verifies the value"""
for a_category in self.rcpt_entry.category:
self.assert_(
isinstance(a_category, atom.Category),
"EmailListRecipient entry <atom:category> element must be an " +
"instance of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
"http://schemas.google.com/g/2005#kind")
self.assertEquals(a_category.term,
"http://schemas.google.com/apps/2006#" +
"emailList.recipient")
def testTitle(self):
"""Tests the existence of <atom:title> in EmailListRecipientEntry
and verifies the value"""
self.assert_(
isinstance(self.rcpt_entry.title, atom.Title),
"EmailListRecipient entry <atom:title> element must be an instance of " +
"atom.Title: %s" % self.rcpt_entry.title)
self.assertEquals(self.rcpt_entry.title.text, 'TestUser')
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.rcpt_entry.GetSelfLink() is not None)
self.assert_(self.rcpt_entry.GetEditLink() is not None)
self.assert_(self.rcpt_entry.GetHtmlLink() is None)
def testWho(self):
"""Tests the existence of a <gdata:who> in EmailListRecipientEntry
and verifies the value"""
self.assert_(isinstance(self.rcpt_entry.who, gdata.apps.Who),
"EmailListRecipient entry <gdata:who> must be an instance of " +
"apps.Who: %s" % self.rcpt_entry.who)
self.assertEquals(self.rcpt_entry.who.email, 'TestUser@example.com')
class AppsEmailListEntryTest(unittest.TestCase):
def setUp(self):
self.list_entry = gdata.apps.EmailListEntryFromString(
test_data.EMAIL_LIST_ENTRY)
def testId(self):
"""Tests the existence of <atom:id> in EmailListEntry and verifies
the value"""
self.assert_(
isinstance(self.list_entry.id, atom.Id),
"EmailList entry <atom:id> element must be an instance of atom.Id: %s" %
self.list_entry.id)
self.assertEquals(
self.list_entry.id.text,
'https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist')
def testUpdated(self):
"""Tests the existence of <atom:updated> in EmailListEntry and
verifies the value"""
self.assert_(
isinstance(self.list_entry.updated, atom.Updated),
"EmailList entry <atom:updated> element must be an instance of " +
"atom.Updated: %s" % self.list_entry.updated)
self.assertEquals(self.list_entry.updated.text,
'1970-01-01T00:00:00.000Z')
def testCategory(self):
"""Tests the existence of <atom:category> in EmailListEntry and
verifies the value"""
for a_category in self.list_entry.category:
self.assert_(
isinstance(a_category, atom.Category),
"EmailList entry <atom:category> element must be an instance " +
"of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
"http://schemas.google.com/g/2005#kind")
self.assertEquals(a_category.term,
"http://schemas.google.com/apps/2006#emailList")
def testTitle(self):
"""Tests the existence of <atom:title> in EmailListEntry and verifies
the value"""
self.assert_(
isinstance(self.list_entry.title, atom.Title),
"EmailList entry <atom:title> element must be an instance of " +
"atom.Title: %s" % self.list_entry.title)
self.assertEquals(self.list_entry.title.text, 'testlist')
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.list_entry.GetSelfLink() is not None)
self.assert_(self.list_entry.GetEditLink() is not None)
self.assert_(self.list_entry.GetHtmlLink() is None)
def testEmailList(self):
"""Tests the existence of a <apps:emailList> in EmailListEntry and
verifies the value"""
self.assert_(isinstance(self.list_entry.email_list, gdata.apps.EmailList),
"EmailList entry <apps:emailList> must be an instance of " +
"apps.EmailList: %s" % self.list_entry.email_list)
self.assertEquals(self.list_entry.email_list.name, 'testlist')
def testFeedLink(self):
"""Test the existence of a <gdata:feedLink> in EmailListEntry and
verifies the value"""
for an_feed_link in self.list_entry.feed_link:
self.assert_(isinstance(an_feed_link, gdata.FeedLink),
"EmailList entry <gdata:feedLink> must be an instance of " +
"gdata.FeedLink: %s" % an_feed_link)
self.assertEquals(self.list_entry.feed_link[0].rel,
'http://schemas.google.com/apps/2006#' +
'emailList.recipients')
self.assertEquals(self.list_entry.feed_link[0].href,
'http://apps-apis.google.com/a/feeds/example.com/emailList/' +
'2.0/testlist/recipient/')
class AppsNicknameEntryTest(unittest.TestCase):
def setUp(self):
self.nick_entry = gdata.apps.NicknameEntryFromString(test_data.NICK_ENTRY)
def testId(self):
"""Tests the existence of <atom:id> in NicknameEntry and verifies
the value"""
self.assert_(
isinstance(self.nick_entry.id, atom.Id),
"Nickname entry <atom:id> element must be an instance of atom.Id: %s" %
self.nick_entry.id)
self.assertEquals(
self.nick_entry.id.text,
'https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo')
def testCategory(self):
"""Tests the existence of <atom:category> in NicknameEntry and
verifies the value"""
for a_category in self.nick_entry.category:
self.assert_(
isinstance(a_category, atom.Category),
"Nickname entry <atom:category> element must be an instance " +
"of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
"http://schemas.google.com/g/2005#kind")
self.assertEquals(a_category.term,
"http://schemas.google.com/apps/2006#nickname")
def testTitle(self):
"""Tests the existence of <atom:title> in NicknameEntry and
verifies the value"""
self.assert_(isinstance(self.nick_entry.title, atom.Title),
"Nickname entry <atom:title> element must be an instance " +
"of atom.Title: %s" % self.nick_entry.title)
self.assertEquals(self.nick_entry.title.text, "Foo")
def testLogin(self):
"""Tests the existence of <apps:login> in NicknameEntry and
verifies the value"""
self.assert_(isinstance(self.nick_entry.login, gdata.apps.Login),
"Nickname entry <apps:login> element must be an instance " +
"of apps.Login: %s" % self.nick_entry.login)
self.assertEquals(self.nick_entry.login.user_name, "TestUser")
def testNickname(self):
"""Tests the existence of <apps:nickname> in NicknameEntry and
verifies the value"""
self.assert_(isinstance(self.nick_entry.nickname, gdata.apps.Nickname),
"Nickname entry <apps:nickname> element must be an instance " +
"of apps.Nickname: %s" % self.nick_entry.nickname)
self.assertEquals(self.nick_entry.nickname.name, "Foo")
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.nick_entry.GetSelfLink() is not None)
self.assert_(self.nick_entry.GetEditLink() is not None)
self.assert_(self.nick_entry.GetHtmlLink() is None)
class AppsUserEntryTest(unittest.TestCase):
def setUp(self):
self.user_entry = gdata.apps.UserEntryFromString(test_data.USER_ENTRY)
def testId(self):
"""Tests the existence of <atom:id> in UserEntry and verifies the
value"""
self.assert_(
isinstance(self.user_entry.id, atom.Id),
"User entry <atom:id> element must be an instance of atom.Id: %s" %
self.user_entry.id)
self.assertEquals(
self.user_entry.id.text,
'https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser')
def testUpdated(self):
"""Tests the existence of <atom:updated> in UserEntry and verifies
the value"""
self.assert_(
isinstance(self.user_entry.updated, atom.Updated),
"User entry <atom:updated> element must be an instance of " +
"atom.Updated: %s" % self.user_entry.updated)
self.assertEquals(self.user_entry.updated.text,
'1970-01-01T00:00:00.000Z')
def testCategory(self):
"""Tests the existence of <atom:category> in UserEntry and
verifies the value"""
for a_category in self.user_entry.category:
self.assert_(
isinstance(a_category, atom.Category),
"User entry <atom:category> element must be an instance " +
"of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
"http://schemas.google.com/g/2005#kind")
self.assertEquals(a_category.term,
"http://schemas.google.com/apps/2006#user")
def testTitle(self):
"""Tests the existence of <atom:title> in UserEntry and verifies
the value"""
self.assert_(
isinstance(self.user_entry.title, atom.Title),
"User entry <atom:title> element must be an instance of atom.Title: %s" %
self.user_entry.title)
self.assertEquals(self.user_entry.title.text, 'TestUser')
def testLinkFinderFindsHtmlLink(self):
"""Tests the return value of GetXXXLink() methods"""
self.assert_(self.user_entry.GetSelfLink() is not None)
self.assert_(self.user_entry.GetEditLink() is not None)
self.assert_(self.user_entry.GetHtmlLink() is None)
def testLogin(self):
"""Tests the existence of <apps:login> in UserEntry and verifies
the value"""
self.assert_(isinstance(self.user_entry.login, gdata.apps.Login),
"User entry <apps:login> element must be an instance of apps.Login: %s"
% self.user_entry.login)
self.assertEquals(self.user_entry.login.user_name, 'TestUser')
self.assertEquals(self.user_entry.login.password, 'password')
self.assertEquals(self.user_entry.login.suspended, 'false')
self.assertEquals(self.user_entry.login.ip_whitelisted, 'false')
self.assertEquals(self.user_entry.login.hash_function_name, 'SHA-1')
def testName(self):
"""Tests the existence of <apps:name> in UserEntry and verifies
the value"""
self.assert_(isinstance(self.user_entry.name, gdata.apps.Name),
"User entry <apps:name> element must be an instance of apps.Name: %s"
% self.user_entry.name)
self.assertEquals(self.user_entry.name.family_name, 'Test')
self.assertEquals(self.user_entry.name.given_name, 'User')
def testQuota(self):
"""Tests the existence of <apps:quota> in UserEntry and verifies
the value"""
self.assert_(isinstance(self.user_entry.quota, gdata.apps.Quota),
"User entry <apps:quota> element must be an instance of apps.Quota: %s"
% self.user_entry.quota)
self.assertEquals(self.user_entry.quota.limit, '1024')
def testFeedLink(self):
"""Test the existence of a <gdata:feedLink> in UserEntry and
verifies the value"""
for an_feed_link in self.user_entry.feed_link:
self.assert_(isinstance(an_feed_link, gdata.FeedLink),
"User entry <gdata:feedLink> must be an instance of gdata.FeedLink" +
": %s" % an_feed_link)
self.assertEquals(self.user_entry.feed_link[0].rel,
'http://schemas.google.com/apps/2006#user.nicknames')
self.assertEquals(self.user_entry.feed_link[0].href,
'https://apps-apis.google.com/a/feeds/example.com/nickname/' +
'2.0?username=Test-3121')
self.assertEquals(self.user_entry.feed_link[1].rel,
'http://schemas.google.com/apps/2006#user.emailLists')
self.assertEquals(self.user_entry.feed_link[1].href,
'https://apps-apis.google.com/a/feeds/example.com/emailList/' +
'2.0?recipient=testlist@example.com')
def testUpdate(self):
"""Tests for modifing attributes of UserEntry"""
self.user_entry.name.family_name = 'ModifiedFamilyName'
self.user_entry.name.given_name = 'ModifiedGivenName'
self.user_entry.quota.limit = '2048'
self.user_entry.login.password = 'ModifiedPassword'
self.user_entry.login.suspended = 'true'
modified = gdata.apps.UserEntryFromString(self.user_entry.ToString())
self.assertEquals(modified.name.family_name, 'ModifiedFamilyName')
self.assertEquals(modified.name.given_name, 'ModifiedGivenName')
self.assertEquals(modified.quota.limit, '2048')
self.assertEquals(modified.login.password, 'ModifiedPassword')
self.assertEquals(modified.login.suspended, 'true')
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
MDAnalysis/mdanalysis | package/MDAnalysis/analysis/__init__.py | 1 | 1421 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
__all__ = [
'align',
'base',
'contacts',
'density',
'distances',
'diffusionmap',
'dihedrals',
'distances',
'gnm',
'hbonds',
'helix_analysis',
'hole2',
'hydrogenbonds',
'leaflet',
'lineardensity',
'msd',
'nuclinfo',
'polymer',
'pca',
'psa',
'rdf',
'rms',
'waterdynamics',
]
| gpl-2.0 |
sjperkins/tensorflow | tensorflow/python/framework/errors_impl.py | 59 | 14885 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import compat
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._message = message
self._node_def = node_def
self._op = op
self._error_code = error_code
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
@{tf.Operation}
object. In that case, this will return `None`, and you should
instead use the @{tf.OpError.node_def} to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nCaused by op %r, defined at:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(self._op.traceback)
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(original_op.traceback)
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
output.append("\n%s (see above for traceback): %s\n" %
(type(self).__name__, self.message))
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
CANCELLED = error_codes_pb2.CANCELLED
UNKNOWN = error_codes_pb2.UNKNOWN
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
NOT_FOUND = error_codes_pb2.NOT_FOUND
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
ABORTED = error_codes_pb2.ABORTED
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
INTERNAL = error_codes_pb2.INTERNAL
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
DATA_LOSS = error_codes_pb2.DATA_LOSS
# pylint: disable=line-too-long
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
@{tf.QueueBase.enqueue} may be
cancelled by running another operation (e.g.
@{tf.QueueBase.close},
or by @{tf.Session.close}.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
@{tf.matmul} op will raise this
error if it receives an input that is not a matrix, and the
@{tf.reshape} op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
@{tf.WholeFileReader.read}
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. @{tf.train.Saver.save})
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
@{tf.WholeFileReader.read}
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a @{tf.Variable}
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
@{tf.QueueBase.enqueue}
operation may raise `AbortedError` if a
@{tf.QueueBase.close} operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
@{tf.QueueBase.dequeue}
operation is blocked on an empty queue, and a
@{tf.QueueBase.close}
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the @{tf.nn.max_pool} operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
@{tf.WholeFileReader.read}
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
_EXCEPTION_CLASS_TO_CODE = dict((
(class_, code) for (code, class_) in _CODE_TO_EXCEPTION_CLASS.items()))
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
def error_code_from_exception_type(cls):
return _EXCEPTION_CLASS_TO_CODE[cls]
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
@contextlib.contextmanager
def raise_exception_on_not_ok_status():
status = pywrap_tensorflow.TF_NewStatus()
try:
yield status
if pywrap_tensorflow.TF_GetCode(status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(pywrap_tensorflow.TF_Message(status)),
pywrap_tensorflow.TF_GetCode(status))
finally:
pywrap_tensorflow.TF_DeleteStatus(status)
| apache-2.0 |
MphasisWyde/eWamSublimeAdaptor | src/third-party/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
redebian/documentation | django/conf/__init__.py | 146 | 6707 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import os
import re
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.utils.functional import LazyObject
from django.utils import importlib
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
# NOTE: This is arguably an EnvironmentError, but that causes
# problems with Python's interactive help.
raise ImportError("Settings cannot be imported, because environment variable %s is undefined." % ENVIRONMENT_VARIABLE)
self._wrapped = Settings(settings_module)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped != None:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return bool(self._wrapped)
configured = property(configured)
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
warnings.warn('If set, %s must end with a slash' % name,
PendingDeprecationWarning)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError, e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in self.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = importlib.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
app_subdirs.sort()
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in app_subdirs:
if name_pattern.match(d) and os.path.isdir(os.path.join(appdir, d)):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
self.INSTALLED_APPS = new_installed_apps
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if self.LOGGING_CONFIG:
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.default_settings = default_settings
def __getattr__(self, name):
return getattr(self.default_settings, name)
def __dir__(self):
return self.__dict__.keys() + dir(self.default_settings)
# For Python < 2.6:
__members__ = property(lambda self: self.__dir__())
settings = LazySettings()
| bsd-3-clause |
bootandy/sqlalchemy | examples/dogpile_caching/advanced.py | 30 | 2950 | """advanced.py
Illustrate usage of Query combined with the FromCache option,
including front-end loading, cache invalidation and collection caching.
"""
from .environment import Session
from .model import Person, cache_address_bits
from .caching_query import FromCache, RelationshipCache
def load_name_range(start, end, invalidate=False):
"""Load Person objects on a range of names.
start/end are integers, range is then
"person <start>" - "person <end>".
The cache option we set up is called "name_range", indicating
a range of names for the Person class.
The `Person.addresses` collections are also cached. Its basically
another level of tuning here, as that particular cache option
can be transparently replaced with joinedload(Person.addresses).
The effect is that each Person and their Address collection
is cached either together or separately, affecting the kind of
SQL that emits for unloaded Person objects as well as the distribution
of data within the cache.
"""
q = Session.query(Person).\
filter(Person.name.between("person %.2d" % start, "person %.2d" % end)).\
options(cache_address_bits).\
options(FromCache("default", "name_range"))
# have the "addresses" collection cached separately
# each lazyload of Person.addresses loads from cache.
q = q.options(RelationshipCache(Person.addresses, "default"))
# alternatively, eagerly load the "addresses" collection, so that they'd
# be cached together. This issues a bigger SQL statement and caches
# a single, larger value in the cache per person rather than two
# separate ones.
#q = q.options(joinedload(Person.addresses))
# if requested, invalidate the cache on current criterion.
if invalidate:
q.invalidate()
return q.all()
print("two through twelve, possibly from cache:\n")
print(", ".join([p.name for p in load_name_range(2, 12)]))
print("\ntwenty five through forty, possibly from cache:\n")
print(", ".join([p.name for p in load_name_range(25, 40)]))
# loading them again, no SQL is emitted
print("\ntwo through twelve, from the cache:\n")
print(", ".join([p.name for p in load_name_range(2, 12)]))
# but with invalidate, they are
print("\ntwenty five through forty, invalidate first:\n")
print(", ".join([p.name for p in load_name_range(25, 40, True)]))
# illustrate the address loading from either cache/already
# on the Person
print("\n\nPeople plus addresses, two through twelve, addresses possibly from cache")
for p in load_name_range(2, 12):
print(p.format_full())
# illustrate the address loading from either cache/already
# on the Person
print("\n\nPeople plus addresses, two through twelve, addresses from cache")
for p in load_name_range(2, 12):
print(p.format_full())
print("\n\nIf this was the first run of advanced.py, try "\
"a second run. Only one SQL statement will be emitted.")
| mit |
MrMinimal64/timezonefinder | build_n_install.py | 1 | 1317 | import os
import sys
PACKAGE = 'timezonefinder'
VERSION_FILE = 'VERSION'
VIRT_ENVS = ['APIenv']
VIRT_ENV_COMMAND = '. ~/miniconda3/etc/profile.d/conda.sh; conda activate {virt_env}; '
PY_VERSION_IDS = ['36', '37', '38'] # the supported python versions to create wheels for
PYTHON_TAG = '.'.join([f'py{v}' for v in PY_VERSION_IDS])
if __name__ == "__main__":
print('building now:')
# routine("python3 setup.py sdist bdist_wheel upload", 'Uploading the package now.') # deprecated
# new twine publishing routine:
# https://packaging.python.org/tutorials/packaging-projects/
# delete the build folder before to get a fresh build
# TODO do not remove dist in the future
os.system('rm -r -f build')
os.system('rm -r -f dist')
build_cmd = f"python setup.py sdist bdist_wheel --python-tag {PYTHON_TAG}"
os.system(build_cmd)
# in all specified virtual environments
for virt_env in VIRT_ENVS:
virt_env_cmd = VIRT_ENV_COMMAND.format(virt_env=virt_env)
install_cmd = f'{virt_env_cmd} python setup.py install'
os.system(install_cmd)
# routine(build_cmd, 'building the package now.',
# 'build done. check the included files! installing package in virtual environment next.')
# routine(install_cmd)
os.system('rm -r -f build')
| mit |
tchellomello/home-assistant | homeassistant/components/iota/__init__.py | 10 | 1974 | """Support for IOTA wallets."""
from datetime import timedelta
import logging
from iota import Iota
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_IRI = "iri"
CONF_TESTNET = "testnet"
CONF_WALLET_NAME = "name"
CONF_WALLET_SEED = "seed"
CONF_WALLETS = "wallets"
DOMAIN = "iota"
IOTA_PLATFORMS = ["sensor"]
SCAN_INTERVAL = timedelta(minutes=10)
WALLET_CONFIG = vol.Schema(
{
vol.Required(CONF_WALLET_NAME): cv.string,
vol.Required(CONF_WALLET_SEED): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_IRI): cv.string,
vol.Optional(CONF_TESTNET, default=False): cv.boolean,
vol.Required(CONF_WALLETS): vol.All(cv.ensure_list, [WALLET_CONFIG]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the IOTA component."""
iota_config = config[DOMAIN]
for platform in IOTA_PLATFORMS:
load_platform(hass, platform, DOMAIN, iota_config, config)
return True
class IotaDevice(Entity):
"""Representation of a IOTA device."""
def __init__(self, name, seed, iri, is_testnet=False):
"""Initialise the IOTA device."""
self._name = name
self._seed = seed
self.iri = iri
self.is_testnet = is_testnet
@property
def name(self):
"""Return the default name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {CONF_WALLET_NAME: self._name}
return attr
@property
def api(self):
"""Construct API object for interaction with the IRI node."""
return Iota(adapter=self.iri, seed=self._seed)
| apache-2.0 |
Sendoushi/servo | tests/wpt/web-platform-tests/tools/py/testing/path/test_svnauth.py | 163 | 16079 | import py
import svntestbase
from py.path import SvnAuth
import time
import sys
svnbin = py.path.local.sysfind('svn')
def make_repo_auth(repo, userdata):
""" write config to repo
user information in userdata is used for auth
userdata has user names as keys, and a tuple (password, readwrite) as
values, where 'readwrite' is either 'r' or 'rw'
"""
confdir = py.path.local(repo).join('conf')
confdir.join('svnserve.conf').write('''\
[general]
anon-access = none
password-db = passwd
authz-db = authz
realm = TestRepo
''')
authzdata = '[/]\n'
passwddata = '[users]\n'
for user in userdata:
authzdata += '%s = %s\n' % (user, userdata[user][1])
passwddata += '%s = %s\n' % (user, userdata[user][0])
confdir.join('authz').write(authzdata)
confdir.join('passwd').write(passwddata)
def serve_bg(repopath):
pidfile = py.path.local(repopath).join('pid')
port = 10000
e = None
while port < 10010:
cmd = 'svnserve -d -T --listen-port=%d --pid-file=%s -r %s' % (
port, pidfile, repopath)
print(cmd)
try:
py.process.cmdexec(cmd)
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
else:
# XXX we assume here that the pid file gets written somewhere, I
# guess this should be relatively safe... (I hope, at least?)
counter = pid = 0
while counter < 10:
counter += 1
try:
pid = pidfile.read()
except py.error.ENOENT:
pass
if pid:
break
time.sleep(0.2)
return port, int(pid)
port += 1
raise IOError('could not start svnserve: %s' % (e,))
class TestSvnAuth(object):
def test_basic(self):
auth = SvnAuth('foo', 'bar')
assert auth.username == 'foo'
assert auth.password == 'bar'
assert str(auth)
def test_makecmdoptions_uname_pw_makestr(self):
auth = SvnAuth('foo', 'bar')
assert auth.makecmdoptions() == '--username="foo" --password="bar"'
def test_makecmdoptions_quote_escape(self):
auth = SvnAuth('fo"o', '"ba\'r"')
assert auth.makecmdoptions() == '--username="fo\\"o" --password="\\"ba\'r\\""'
def test_makecmdoptions_no_cache_auth(self):
auth = SvnAuth('foo', 'bar', cache_auth=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--no-auth-cache')
def test_makecmdoptions_no_interactive(self):
auth = SvnAuth('foo', 'bar', interactive=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--non-interactive')
def test_makecmdoptions_no_interactive_no_cache_auth(self):
auth = SvnAuth('foo', 'bar', cache_auth=False,
interactive=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--no-auth-cache --non-interactive')
class svnwc_no_svn(py.path.svnwc):
def __new__(cls, *args, **kwargs):
self = super(svnwc_no_svn, cls).__new__(cls, *args, **kwargs)
self.commands = []
return self
def _svn(self, *args):
self.commands.append(args)
class TestSvnWCAuth(object):
def setup_method(self, meth):
if not svnbin:
py.test.skip("svn binary required")
self.auth = SvnAuth('user', 'pass', cache_auth=False)
def test_checkout(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_commit(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.commit('msg')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_checkout_no_cache_auth(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_checkout_auth_from_constructor(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
class svnurl_no_svn(py.path.svnurl):
cmdexec_output = 'test'
popen_output = 'test'
def __new__(cls, *args, **kwargs):
self = super(svnurl_no_svn, cls).__new__(cls, *args, **kwargs)
self.commands = []
return self
def _cmdexec(self, cmd):
self.commands.append(cmd)
return self.cmdexec_output
def _popen(self, cmd):
self.commands.append(cmd)
return self.popen_output
class TestSvnURLAuth(object):
def setup_method(self, meth):
self.auth = SvnAuth('foo', 'bar')
def test_init(self):
u = svnurl_no_svn('http://foo.bar/svn')
assert u.auth is None
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
assert u.auth is self.auth
def test_new(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
new = u.new(basename='bar')
assert new.auth is self.auth
assert new.url == 'http://foo.bar/svn/bar'
def test_join(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
new = u.join('foo')
assert new.auth is self.auth
assert new.url == 'http://foo.bar/svn/foo'
def test_listdir(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u.cmdexec_output = '''\
1717 johnny 1529 Nov 04 14:32 LICENSE.txt
1716 johnny 5352 Nov 04 14:28 README.txt
'''
paths = u.listdir()
assert paths[0].auth is self.auth
assert paths[1].auth is self.auth
assert paths[0].basename == 'LICENSE.txt'
def test_info(self):
u = svnurl_no_svn('http://foo.bar/svn/LICENSE.txt', auth=self.auth)
def dirpath(self):
return self
u.cmdexec_output = '''\
1717 johnny 1529 Nov 04 14:32 LICENSE.txt
1716 johnny 5352 Nov 04 14:28 README.txt
'''
org_dp = u.__class__.dirpath
u.__class__.dirpath = dirpath
try:
info = u.info()
finally:
u.dirpath = org_dp
assert info.size == 1529
def test_open(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
foo = u.join('foo')
foo.check = lambda *args, **kwargs: True
ret = foo.open()
assert ret == 'test'
assert '--username="foo" --password="bar"' in foo.commands[0]
def test_dirpath(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
parent = u.dirpath()
assert parent.auth is self.auth
def test_mkdir(self):
u = svnurl_no_svn('http://foo.bar/svn/qweqwe', auth=self.auth)
assert not u.commands
u.mkdir(msg='created dir foo')
assert u.commands
assert '--username="foo" --password="bar"' in u.commands[0]
def test_copy(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u2 = svnurl_no_svn('http://foo.bar/svn2')
u.copy(u2, 'copied dir')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_rename(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.rename('http://foo.bar/svn/bar', 'moved foo to bar')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_remove(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.remove(msg='removing foo')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_export(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
target = py.path.local('/foo')
u.export(target)
assert '--username="foo" --password="bar"' in u.commands[0]
def test_log(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.popen_output = py.io.TextIO(py.builtin._totext('''\
<?xml version="1.0"?>
<log>
<logentry revision="51381">
<author>guido</author>
<date>2008-02-11T12:12:18.476481Z</date>
<msg>Creating branch to work on auth support for py.path.svn*.
</msg>
</logentry>
</log>
''', 'ascii'))
u.check = lambda *args, **kwargs: True
ret = u.log(10, 20, verbose=True)
assert '--username="foo" --password="bar"' in u.commands[0]
assert len(ret) == 1
assert int(ret[0].rev) == 51381
assert ret[0].author == 'guido'
def test_propget(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u.propget('foo')
assert '--username="foo" --password="bar"' in u.commands[0]
def pytest_funcarg__setup(request):
return Setup(request)
class Setup:
def __init__(self, request):
if not svnbin:
py.test.skip("svn binary required")
if not request.config.option.runslowtests:
py.test.skip('use --runslowtests to run these tests')
tmpdir = request.getfuncargvalue("tmpdir")
repodir = tmpdir.join("repo")
py.process.cmdexec('svnadmin create %s' % repodir)
if sys.platform == 'win32':
repodir = '/' + str(repodir).replace('\\', '/')
self.repo = py.path.svnurl("file://%s" % repodir)
if py.std.sys.platform == 'win32':
# remove trailing slash...
repodir = repodir[1:]
self.repopath = py.path.local(repodir)
self.temppath = tmpdir.mkdir("temppath")
self.auth = SvnAuth('johnny', 'foo', cache_auth=False,
interactive=False)
make_repo_auth(self.repopath, {'johnny': ('foo', 'rw')})
self.port, self.pid = serve_bg(self.repopath.dirpath())
# XXX caching is too global
py.path.svnurl._lsnorevcache._dict.clear()
request.addfinalizer(lambda: py.process.kill(self.pid))
class TestSvnWCAuthFunctional:
def test_checkout_constructor_arg(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
assert wc.join('.svn').check()
def test_checkout_function_arg(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
assert wc.join('.svn').check()
def test_checkout_failing_non_interactive(self, setup):
auth = SvnAuth('johnny', 'bar', cache_auth=False,
interactive=False)
wc = py.path.svnwc(setup.temppath, auth)
py.test.raises(Exception,
("wc.checkout('svn://localhost:%(port)s/%(repopath)s')" %
setup.__dict__))
def test_log(self, setup):
wc = py.path.svnwc(setup.temppath, setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
foo = wc.ensure('foo.txt')
wc.commit('added foo.txt')
log = foo.log()
assert len(log) == 1
assert log[0].msg == 'added foo.txt'
def test_switch(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
svnurl = 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename)
wc.checkout(svnurl)
wc.ensure('foo', dir=True).ensure('foo.txt').write('foo')
wc.commit('added foo dir with foo.txt file')
wc.ensure('bar', dir=True)
wc.commit('added bar dir')
bar = wc.join('bar')
bar.switch(svnurl + '/foo')
assert bar.join('foo.txt')
def test_update(self, setup):
wc1 = py.path.svnwc(setup.temppath.ensure('wc1', dir=True),
auth=setup.auth)
wc2 = py.path.svnwc(setup.temppath.ensure('wc2', dir=True),
auth=setup.auth)
wc1.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
wc2.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
wc1.ensure('foo', dir=True)
wc1.commit('added foo dir')
wc2.update()
assert wc2.join('foo').check()
auth = SvnAuth('unknown', 'unknown', interactive=False)
wc2.auth = auth
py.test.raises(Exception, 'wc2.update()')
def test_lock_unlock_status(self, setup):
port = setup.port
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
wc.ensure('foo', file=True)
wc.commit('added foo file')
foo = wc.join('foo')
foo.lock()
status = foo.status()
assert status.locked
foo.unlock()
status = foo.status()
assert not status.locked
auth = SvnAuth('unknown', 'unknown', interactive=False)
foo.auth = auth
py.test.raises(Exception, 'foo.lock()')
py.test.raises(Exception, 'foo.unlock()')
def test_diff(self, setup):
port = setup.port
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
wc.ensure('foo', file=True)
wc.commit('added foo file')
wc.update()
rev = int(wc.status().rev)
foo = wc.join('foo')
foo.write('bar')
diff = foo.diff()
assert '\n+bar\n' in diff
foo.commit('added some content')
diff = foo.diff()
assert not diff
diff = foo.diff(rev=rev)
assert '\n+bar\n' in diff
auth = SvnAuth('unknown', 'unknown', interactive=False)
foo.auth = auth
py.test.raises(Exception, 'foo.diff(rev=rev)')
class TestSvnURLAuthFunctional:
def test_listdir(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
u.ensure('foo')
paths = u.listdir()
assert len(paths) == 1
assert paths[0].auth is setup.auth
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
py.test.raises(Exception, 'u.listdir()')
def test_copy(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
foo = u.mkdir('foo')
assert foo.check()
bar = u.join('bar')
foo.copy(bar)
assert bar.check()
assert bar.auth is setup.auth
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
foo = u.join('foo')
bar = u.join('bar')
py.test.raises(Exception, 'foo.copy(bar)')
def test_write_read(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
foo = u.ensure('foo')
fp = foo.open()
try:
data = fp.read()
finally:
fp.close()
assert data == ''
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
foo = u.join('foo')
py.test.raises(Exception, 'foo.open()')
# XXX rinse, repeat... :|
| mpl-2.0 |
paweljasinski/ironpython3 | Src/Scripts/test_parrot.py | 3 | 2303 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
""" This provides a more convenient harness for running this
benchmark and collecting separate timings for each component.
"""
from time import clock
import sys, nt
sys.path.append([nt.environ[x] for x in nt.environ.keys() if x.lower() == "dlr_root"][0] + "\\Languages\\IronPython\\External\\parrotbench")
if sys.platform=="cli":
import System
is_cli64 = System.IntPtr.Size == 8
if is_cli64:
print "CodePlex 18518"
sys.exit(0)
def test_main(type="short"):
oldRecursionDepth = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1001)
t0 = clock()
import b0
import b1
import b2
import b3
import b4
import b5
import b6
print 'import time = %.2f' % (clock()-t0)
tests = [b0,b1,b2,b3,b4,b5,b6]
N = { "short" : 1, "full" : 1, "medium" : 2, "long" : 4 }[type]
results = {}
t0 = clock()
for i in range(N):
for test in tests:
ts0 = clock()
test.main()
tm = (clock()-ts0)
results.setdefault(test, []).append(tm)
print '%.2f sec running %s' % ( tm, test.__name__)
for test in tests:
print '%s = %f -- %r' % (test.__name__, sum(results[test])/N, results[test])
print 'all done in %.2f sec' % (clock()-t0)
finally:
sys.setrecursionlimit(oldRecursionDepth)
if __name__=="__main__":
kind = "short"
if len(sys.argv) > 1: kind = sys.argv[1]
test_main(kind)
| apache-2.0 |
tow/sunburnt | sunburnt/strings.py | 5 | 1655 | from __future__ import absolute_import
class SolrString(unicode):
# The behaviour below is only really relevant for String fields rather
# than Text fields - most queryparsers will strip these characters out
# for a text field anyway.
lucene_special_chars = '+-&|!(){}[]^"~*?: \t\v\\/'
def escape_for_lqs_term(self):
if self in ["AND", "OR", "NOT", ""]:
return u'"%s"' % self
chars = []
for c in self.chars:
if isinstance(c, basestring) and c in self.lucene_special_chars:
chars.append(u'\%s'%c)
else:
chars.append(u'%s'%c)
return u''.join(chars)
class RawString(SolrString):
def __init__(self, s):
self.chars = self
class WildcardString(SolrString):
def __init__(self, s):
self.chars = self.get_wildcards(s)
class SpecialChar(object):
def __unicode__(self):
return unicode(self.char)
class Asterisk(SpecialChar):
char = u'*'
class QuestionMark(SpecialChar):
char = u'?'
def get_wildcards(self, s):
backslash = False
i = 0
chars = []
for c in s:
if backslash:
backslash = False
chars.append(c)
continue
i += 1
if c == u'\\':
backslash = True
elif c == u'*':
chars.append(self.Asterisk())
elif c == u'?':
chars.append(self.QuestionMark())
else:
chars.append(c)
if backslash:
chars.append(u'\\')
return chars
| mit |
nuclearmistake/repo | git_config.py | 3 | 21602 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import errno
import json
import os
import re
import ssl
import subprocess
import sys
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import time
from pyversion import is_python3
if is_python3():
import urllib.request
import urllib.error
else:
import urllib2
import imp
urllib = imp.new_module('urllib')
urllib.request = urllib2
urllib.error = urllib2
from signal import SIGTERM
from error import GitError, UploadError
import platform_utils
from trace import Trace
if is_python3():
from http.client import HTTPException
else:
from httplib import HTTPException
from git_command import GitCommand
from git_command import ssh_sock
from git_command import terminate_ssh_clients
from git_refs import R_CHANGES, R_HEADS, R_TAGS
ID_RE = re.compile(r'^[0-9a-f]{40}$')
REVIEW_CACHE = dict()
def IsChange(rev):
return rev.startswith(R_CHANGES)
def IsId(rev):
return ID_RE.match(rev)
def IsTag(rev):
return rev.startswith(R_TAGS)
def IsImmutable(rev):
return IsChange(rev) or IsId(rev) or IsTag(rev)
def _key(name):
parts = name.split('.')
if len(parts) < 2:
return name.lower()
parts[ 0] = parts[ 0].lower()
parts[-1] = parts[-1].lower()
return '.'.join(parts)
class GitConfig(object):
_ForUser = None
@classmethod
def ForUser(cls):
if cls._ForUser is None:
cls._ForUser = cls(configfile = os.path.expanduser('~/.gitconfig'))
return cls._ForUser
@classmethod
def ForRepository(cls, gitdir, defaults=None):
return cls(configfile = os.path.join(gitdir, 'config'),
defaults = defaults)
def __init__(self, configfile, defaults=None, jsonFile=None):
self.file = configfile
self.defaults = defaults
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
self._json = jsonFile
if self._json is None:
self._json = os.path.join(
os.path.dirname(self.file),
'.repo_' + os.path.basename(self.file) + '.json')
def Has(self, name, include_defaults = True):
"""Return true if this configuration file has the key.
"""
if _key(name) in self._cache:
return True
if include_defaults and self.defaults:
return self.defaults.Has(name, include_defaults = True)
return False
def GetBoolean(self, name):
"""Returns a boolean from the configuration file.
None : The value was not defined, or is not a boolean.
True : The value was set to true or yes.
False: The value was set to false or no.
"""
v = self.GetString(name)
if v is None:
return None
v = v.lower()
if v in ('true', 'yes'):
return True
if v in ('false', 'no'):
return False
return None
def GetString(self, name, all_keys=False):
"""Get the first value for a key, or None if it is not defined.
This configuration file is used first, if the key is not
defined or all_keys = True then the defaults are also searched.
"""
try:
v = self._cache[_key(name)]
except KeyError:
if self.defaults:
return self.defaults.GetString(name, all_keys = all_keys)
v = []
if not all_keys:
if v:
return v[0]
return None
r = []
r.extend(v)
if self.defaults:
r.extend(self.defaults.GetString(name, all_keys = True))
return r
def SetString(self, name, value):
"""Set the value(s) for a key.
Only this configuration file is modified.
The supplied value should be either a string,
or a list of strings (to store multiple values).
"""
key = _key(name)
try:
old = self._cache[key]
except KeyError:
old = []
if value is None:
if old:
del self._cache[key]
self._do('--unset-all', name)
elif isinstance(value, list):
if len(value) == 0:
self.SetString(name, None)
elif len(value) == 1:
self.SetString(name, value[0])
elif old != value:
self._cache[key] = list(value)
self._do('--replace-all', name, value[0])
for i in range(1, len(value)):
self._do('--add', name, value[i])
elif len(old) != 1 or old[0] != value:
self._cache[key] = [value]
self._do('--replace-all', name, value)
def GetRemote(self, name):
"""Get the remote.$name.* configuration values as an object.
"""
try:
r = self._remotes[name]
except KeyError:
r = Remote(self, name)
self._remotes[r.name] = r
return r
def GetBranch(self, name):
"""Get the branch.$name.* configuration values as an object.
"""
try:
b = self._branches[name]
except KeyError:
b = Branch(self, name)
self._branches[b.name] = b
return b
def GetSubSections(self, section):
"""List all subsection names matching $section.*.*
"""
return self._sections.get(section, set())
def HasSection(self, section, subsection = ''):
"""Does at least one key in section.subsection exist?
"""
try:
return subsection in self._sections[section]
except KeyError:
return False
def UrlInsteadOf(self, url):
"""Resolve any url.*.insteadof references.
"""
for new_url in self.GetSubSections('url'):
for old_url in self.GetString('url.%s.insteadof' % new_url, True):
if old_url is not None and url.startswith(old_url):
return new_url + url[len(old_url):]
return url
@property
def _sections(self):
d = self._section_dict
if d is None:
d = {}
for name in self._cache.keys():
p = name.split('.')
if 2 == len(p):
section = p[0]
subsect = ''
else:
section = p[0]
subsect = '.'.join(p[1:-1])
if section not in d:
d[section] = set()
d[section].add(subsect)
self._section_dict = d
return d
@property
def _cache(self):
if self._cache_dict is None:
self._cache_dict = self._Read()
return self._cache_dict
def _Read(self):
d = self._ReadJson()
if d is None:
d = self._ReadGit()
self._SaveJson(d)
return d
def _ReadJson(self):
try:
if os.path.getmtime(self._json) \
<= os.path.getmtime(self.file):
platform_utils.remove(self._json)
return None
except OSError:
return None
try:
Trace(': parsing %s', self.file)
fd = open(self._json)
try:
return json.load(fd)
finally:
fd.close()
except (IOError, ValueError):
platform_utils.remove(self._json)
return None
def _SaveJson(self, cache):
try:
fd = open(self._json, 'w')
try:
json.dump(cache, fd, indent=2)
finally:
fd.close()
except (IOError, TypeError):
if os.path.exists(self._json):
platform_utils.remove(self._json)
def _ReadGit(self):
"""
Read configuration data from git.
This internal method populates the GitConfig cache.
"""
c = {}
d = self._do('--null', '--list')
if d is None:
return c
for line in d.decode('utf-8').rstrip('\0').split('\0'): # pylint: disable=W1401
# Backslash is not anomalous
if '\n' in line:
key, val = line.split('\n', 1)
else:
key = line
val = None
if key in c:
c[key].append(val)
else:
c[key] = [val]
return c
def _do(self, *args):
command = ['config', '--file', self.file]
command.extend(args)
p = GitCommand(None,
command,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
return p.stdout
else:
GitError('git config %s: %s' % (str(args), p.stderr))
class RefSpec(object):
"""A Git refspec line, split into its components:
forced: True if the line starts with '+'
src: Left side of the line
dst: Right side of the line
"""
@classmethod
def FromString(cls, rs):
lhs, rhs = rs.split(':', 2)
if lhs.startswith('+'):
lhs = lhs[1:]
forced = True
else:
forced = False
return cls(forced, lhs, rhs)
def __init__(self, forced, lhs, rhs):
self.forced = forced
self.src = lhs
self.dst = rhs
def SourceMatches(self, rev):
if self.src:
if rev == self.src:
return True
if self.src.endswith('/*') and rev.startswith(self.src[:-1]):
return True
return False
def DestMatches(self, ref):
if self.dst:
if ref == self.dst:
return True
if self.dst.endswith('/*') and ref.startswith(self.dst[:-1]):
return True
return False
def MapSource(self, rev):
if self.src.endswith('/*'):
return self.dst[:-1] + rev[len(self.src) - 1:]
return self.dst
def __str__(self):
s = ''
if self.forced:
s += '+'
if self.src:
s += self.src
if self.dst:
s += ':'
s += self.dst
return s
_master_processes = []
_master_keys = set()
_ssh_master = True
_master_keys_lock = None
def init_ssh():
"""Should be called once at the start of repo to init ssh master handling.
At the moment, all we do is to create our lock.
"""
global _master_keys_lock
assert _master_keys_lock is None, "Should only call init_ssh once"
_master_keys_lock = _threading.Lock()
def _open_ssh(host, port=None):
global _ssh_master
# Acquire the lock. This is needed to prevent opening multiple masters for
# the same host when we're running "repo sync -jN" (for N > 1) _and_ the
# manifest <remote fetch="ssh://xyz"> specifies a different host from the
# one that was passed to repo init.
_master_keys_lock.acquire()
try:
# Check to see whether we already think that the master is running; if we
# think it's already running, return right away.
if port is not None:
key = '%s:%s' % (host, port)
else:
key = host
if key in _master_keys:
return True
if not _ssh_master \
or 'GIT_SSH' in os.environ \
or sys.platform in ('win32', 'cygwin'):
# failed earlier, or cygwin ssh can't do this
#
return False
# We will make two calls to ssh; this is the common part of both calls.
command_base = ['ssh',
'-o','ControlPath %s' % ssh_sock(),
host]
if port is not None:
command_base[1:1] = ['-p', str(port)]
# Since the key wasn't in _master_keys, we think that master isn't running.
# ...but before actually starting a master, we'll double-check. This can
# be important because we can't tell that that 'git@myhost.com' is the same
# as 'myhost.com' where "User git" is setup in the user's ~/.ssh/config file.
check_command = command_base + ['-O','check']
try:
Trace(': %s', ' '.join(check_command))
check_process = subprocess.Popen(check_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
check_process.communicate() # read output, but ignore it...
isnt_running = check_process.wait()
if not isnt_running:
# Our double-check found that the master _was_ infact running. Add to
# the list of keys.
_master_keys.add(key)
return True
except Exception:
# Ignore excpetions. We we will fall back to the normal command and print
# to the log there.
pass
command = command_base[:1] + \
['-M', '-N'] + \
command_base[1:]
try:
Trace(': %s', ' '.join(command))
p = subprocess.Popen(command)
except Exception as e:
_ssh_master = False
print('\nwarn: cannot enable ssh control master for %s:%s\n%s'
% (host,port, str(e)), file=sys.stderr)
return False
time.sleep(1)
ssh_died = (p.poll() is not None)
if ssh_died:
return False
_master_processes.append(p)
_master_keys.add(key)
return True
finally:
_master_keys_lock.release()
def close_ssh():
global _master_keys_lock
terminate_ssh_clients()
for p in _master_processes:
try:
os.kill(p.pid, SIGTERM)
p.wait()
except OSError:
pass
del _master_processes[:]
_master_keys.clear()
d = ssh_sock(create=False)
if d:
try:
os.rmdir(os.path.dirname(d))
except OSError:
pass
# We're done with the lock, so we can delete it.
_master_keys_lock = None
URI_SCP = re.compile(r'^([^@:]*@?[^:/]{1,}):')
URI_ALL = re.compile(r'^([a-z][a-z+-]*)://([^@/]*@?[^/]*)/')
def GetSchemeFromUrl(url):
m = URI_ALL.match(url)
if m:
return m.group(1)
return None
@contextlib.contextmanager
def GetUrlCookieFile(url, quiet):
if url.startswith('persistent-'):
try:
p = subprocess.Popen(
['git-remote-persistent-https', '-print_config', url],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
cookieprefix = 'http.cookiefile='
proxyprefix = 'http.proxy='
cookiefile = None
proxy = None
for line in p.stdout:
line = line.strip()
if line.startswith(cookieprefix):
cookiefile = line[len(cookieprefix):]
if line.startswith(proxyprefix):
proxy = line[len(proxyprefix):]
# Leave subprocess open, as cookie file may be transient.
if cookiefile or proxy:
yield cookiefile, proxy
return
finally:
p.stdin.close()
if p.wait():
err_msg = p.stderr.read()
if ' -print_config' in err_msg:
pass # Persistent proxy doesn't support -print_config.
elif not quiet:
print(err_msg, file=sys.stderr)
except OSError as e:
if e.errno == errno.ENOENT:
pass # No persistent proxy.
raise
yield GitConfig.ForUser().GetString('http.cookiefile'), None
def _preconnect(url):
m = URI_ALL.match(url)
if m:
scheme = m.group(1)
host = m.group(2)
if ':' in host:
host, port = host.split(':')
else:
port = None
if scheme in ('ssh', 'git+ssh', 'ssh+git'):
return _open_ssh(host, port)
return False
m = URI_SCP.match(url)
if m:
host = m.group(1)
return _open_ssh(host)
return False
class Remote(object):
"""Configuration options related to a remote.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.url = self._Get('url')
self.pushUrl = self._Get('pushurl')
self.review = self._Get('review')
self.projectname = self._Get('projectname')
self.fetch = list(map(RefSpec.FromString,
self._Get('fetch', all_keys=True)))
self._review_url = None
def _InsteadOf(self):
globCfg = GitConfig.ForUser()
urlList = globCfg.GetSubSections('url')
longest = ""
longestUrl = ""
for url in urlList:
key = "url." + url + ".insteadOf"
insteadOfList = globCfg.GetString(key, all_keys=True)
for insteadOf in insteadOfList:
if self.url.startswith(insteadOf) \
and len(insteadOf) > len(longest):
longest = insteadOf
longestUrl = url
if len(longest) == 0:
return self.url
return self.url.replace(longest, longestUrl, 1)
def PreConnectFetch(self):
connectionUrl = self._InsteadOf()
return _preconnect(connectionUrl)
def ReviewUrl(self, userEmail, validate_certs):
if self._review_url is None:
if self.review is None:
return None
u = self.review
if u.startswith('persistent-'):
u = u[len('persistent-'):]
if u.split(':')[0] not in ('http', 'https', 'sso', 'ssh'):
u = 'http://%s' % u
if u.endswith('/Gerrit'):
u = u[:len(u) - len('/Gerrit')]
if u.endswith('/ssh_info'):
u = u[:len(u) - len('/ssh_info')]
if not u.endswith('/'):
u += '/'
http_url = u
if u in REVIEW_CACHE:
self._review_url = REVIEW_CACHE[u]
elif 'REPO_HOST_PORT_INFO' in os.environ:
host, port = os.environ['REPO_HOST_PORT_INFO'].split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
REVIEW_CACHE[u] = self._review_url
elif u.startswith('sso:') or u.startswith('ssh:'):
self._review_url = u # Assume it's right
REVIEW_CACHE[u] = self._review_url
elif 'REPO_IGNORE_SSH_INFO' in os.environ:
self._review_url = http_url
REVIEW_CACHE[u] = self._review_url
else:
try:
info_url = u + 'ssh_info'
if not validate_certs:
context = ssl._create_unverified_context()
info = urllib.request.urlopen(info_url, context=context).read()
else:
info = urllib.request.urlopen(info_url).read()
if info == 'NOT_AVAILABLE' or '<' in info:
# If `info` contains '<', we assume the server gave us some sort
# of HTML response back, like maybe a login page.
#
# Assume HTTP if SSH is not enabled or ssh_info doesn't look right.
self._review_url = http_url
else:
host, port = info.split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
except urllib.error.HTTPError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except urllib.error.URLError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except HTTPException as e:
raise UploadError('%s: %s' % (self.review, e.__class__.__name__))
REVIEW_CACHE[u] = self._review_url
return self._review_url + self.projectname
def _SshReviewUrl(self, userEmail, host, port):
username = self._config.GetString('review.%s.username' % self.review)
if username is None:
username = userEmail.split('@')[0]
return 'ssh://%s@%s:%s/' % (username, host, port)
def ToLocal(self, rev):
"""Convert a remote revision string to something we have locally.
"""
if self.name == '.' or IsId(rev):
return rev
if not rev.startswith('refs/'):
rev = R_HEADS + rev
for spec in self.fetch:
if spec.SourceMatches(rev):
return spec.MapSource(rev)
if not rev.startswith(R_HEADS):
return rev
raise GitError('remote %s does not have %s' % (self.name, rev))
def WritesTo(self, ref):
"""True if the remote stores to the tracking ref.
"""
for spec in self.fetch:
if spec.DestMatches(ref):
return True
return False
def ResetFetch(self, mirror=False):
"""Set the fetch refspec to its default value.
"""
if mirror:
dst = 'refs/heads/*'
else:
dst = 'refs/remotes/%s/*' % self.name
self.fetch = [RefSpec(True, 'refs/heads/*', dst)]
def Save(self):
"""Save this remote to the configuration.
"""
self._Set('url', self.url)
if self.pushUrl is not None:
self._Set('pushurl', self.pushUrl + '/' + self.projectname)
else:
self._Set('pushurl', self.pushUrl)
self._Set('review', self.review)
self._Set('projectname', self.projectname)
self._Set('fetch', list(map(str, self.fetch)))
def _Set(self, key, value):
key = 'remote.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'remote.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
class Branch(object):
"""Configuration options related to a single branch.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.merge = self._Get('merge')
r = self._Get('remote')
if r:
self.remote = self._config.GetRemote(r)
else:
self.remote = None
@property
def LocalMerge(self):
"""Convert the merge spec to a local name.
"""
if self.remote and self.merge:
return self.remote.ToLocal(self.merge)
return None
def Save(self):
"""Save this branch back into the configuration.
"""
if self._config.HasSection('branch', self.name):
if self.remote:
self._Set('remote', self.remote.name)
else:
self._Set('remote', None)
self._Set('merge', self.merge)
else:
fd = open(self._config.file, 'a')
try:
fd.write('[branch "%s"]\n' % self.name)
if self.remote:
fd.write('\tremote = %s\n' % self.remote.name)
if self.merge:
fd.write('\tmerge = %s\n' % self.merge)
finally:
fd.close()
def _Set(self, key, value):
key = 'branch.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'branch.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
| apache-2.0 |
ligovirgo/seismon | RfPrediction/BLRMS_Prediction/condor_seismic_peaks.py | 1 | 1969 |
import os, sys
import glob
import optparse
import tables
import pandas as pd
import numpy as np
import h5py
def parse_commandline():
"""
Parse the options given on the command-line.
"""
parser = optparse.OptionParser()
parser.add_option('-i','--ifos', type=str, default='LHO,LLO', help='GW Observatories: LLO,LHO...')
opts, args = parser.parse_args()
return opts
# Parse command line
opts = parse_commandline()
condorDir = './'
logDir = os.path.join(condorDir,'logs')
if not os.path.isdir(logDir):
os.makedirs(logDir)
condordag = os.path.join(condorDir,'condor.dag')
fid = open(condordag,'w')
condorsh = os.path.join(condorDir,'condor.sh')
fid1 = open(condorsh,'w')
job_number = 0
ifos = opts.ifos.split(",")
for ifo in ifos:
x = np.genfromtxt('./masterlists/{}.dat'.format(ifo))
for ii,row in enumerate(x):
fid1.write('python fetch_seismic_peaks.py -i %s -ID %d -blrmsBand 30M_100M -saveResult 1 -saveImage 0\n'%(ifo,ii))
fid.write('JOB %d condor.sub\n'%(job_number))
fid.write('RETRY %d 3\n'%(job_number))
fid.write('VARS %d jobNumber="%d" ifo="%s" id="%d"\n'%(job_number,job_number, ifo, ii))
fid.write('\n\n')
job_number = job_number + 1
fid1.close()
fid.close()
fid = open(os.path.join(condorDir,'condor.sub'),'w')
fid.write('executable = ./fetch_seismic_peaks.py\n')
fid.write('output = logs/out.$(jobNumber)\n');
fid.write('error = logs/err.$(jobNumber)\n');
fid.write('arguments = -IFO $(ifo) -ID $(id) -blrmsBand 30M_100M -saveResult 1 -saveImage 0\n')
fid.write('requirements = OpSys == "LINUX"\n');
fid.write('request_memory = 8192\n');
fid.write('request_cpus = 1\n');
fid.write('accounting_group = ligo.dev.o2.burst.allsky.stamp\n');
fid.write('notification = never\n');
fid.write('getenv = true\n');
fid.write('log = /usr1/mcoughlin/seismon.log\n')
fid.write('+MaxHours = 24\n');
fid.write('universe = vanilla\n');
fid.write('queue 1\n');
fid.close()
| gpl-3.0 |
promptworks/keystone | keystone/common/kvs/backends/inmemdb.py | 26 | 1902 | # Copyright 2013 Metacloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Keystone In-Memory Dogpile.cache backend implementation.
"""
import copy
from dogpile.cache import api
NO_VALUE = api.NO_VALUE
class MemoryBackend(api.CacheBackend):
"""A backend that uses a plain dictionary.
There is no size management, and values which are placed into the
dictionary will remain until explicitly removed. Note that Dogpile's
expiration of items is based on timestamps and does not remove them from
the cache.
E.g.::
from dogpile.cache import make_region
region = make_region().configure(
'keystone.common.kvs.Memory'
)
"""
def __init__(self, arguments):
self._db = {}
def _isolate_value(self, value):
if value is not NO_VALUE:
return copy.deepcopy(value)
return value
def get(self, key):
return self._isolate_value(self._db.get(key, NO_VALUE))
def get_multi(self, keys):
return [self.get(key) for key in keys]
def set(self, key, value):
self._db[key] = self._isolate_value(value)
def set_multi(self, mapping):
for key, value in mapping.items():
self.set(key, value)
def delete(self, key):
self._db.pop(key, None)
def delete_multi(self, keys):
for key in keys:
self.delete(key)
| apache-2.0 |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/encodings/cp863.py | 593 | 34508 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xb6' # 0x0086 -> PILCROW SIGN
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u2017' # 0x008d -> DOUBLE LOW LINE
u'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa7' # 0x008f -> SECTION SIGN
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xa4' # 0x0098 -> CURRENCY SIGN
u'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xa6' # 0x00a0 -> BROKEN BAR
u'\xb4' # 0x00a1 -> ACUTE ACCENT
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xa8' # 0x00a4 -> DIAERESIS
u'\xb8' # 0x00a5 -> CEDILLA
u'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
u'\xaf' # 0x00a7 -> MACRON
u'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-2.0 |
ptphp/PyLib | src/webpy1/src/fetchtel/fetch99tel.py | 2 | 1894 | # coding=utf-8
import sys,re
import urllib2,urllib,sys,socket,re
from PyQt4.QtCore import *
import urllib,time
from BeautifulSoup import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8') #@UndefinedVariable
class baseSpider(QThread):
def __init__(self,parent = None):
super(baseSpider,self).__init__(parent)
self.suspended = False
self.stoped = False
self.mutex = QMutex()
def _init(self,s,e):
self.start_p = s
self.end_p =e
def start(self):
with QMutexLocker(self.mutex):
self.stoped = False
#for i in range(self.start_p,self.end_p):
for i in range(1,3):
while self.suspended:
self.wait()
return
if self.stoped:
return
url ="http://www.99fang.com/service/agency/a1/?p=%d" % i
print url
try:
r = urllib2.urlopen(url).read()
soup = BeautifulSoup(r)
box = soup.find("div",{'class':'agency-call-box'})
lis = box("li")
for li in lis:
tel = li.a.string
print tel
r =urllib2.urlopen("http://suzhou.jjr360.com/app.php?c=spider&a=index&city=&tel=%s" % tel)
print r.read()
except:
pass
else:
#self.emit(SIGNAL("updateTime()"))
time.sleep(1)
def stop(self):
with QMutexLocker(self.mutex):
self.stoped = True
self.suspended = False
def suspend(self):
with QMutexLocker(self.mutex):
self.suspended = True
self.stoped = False
if __name__ == "__main__":
c = baseSpider()
c._init(1,3)
c.start()
| apache-2.0 |
TalShafir/ansible | test/units/modules/network/nxos/test_nxos_ospf.py | 45 | 2043 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_ospf
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosOspfModule(TestNxosModule):
module = nxos_ospf
def setUp(self):
super(TestNxosOspfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_ospf.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_ospf.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosOspfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.load_config.return_value = None
def test_nxos_ospf_present(self):
set_module_args(dict(ospf=1, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['router ospf 1'])
def test_nxos_ospf_absent(self):
set_module_args(dict(ospf=1, state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
| gpl-3.0 |
procangroup/edx-platform | common/lib/xmodule/xmodule/tests/test_annotator_mixin.py | 223 | 1932 | """
This test will run for annotator_mixin.py
"""
import unittest
from lxml import etree
from xmodule.annotator_mixin import get_instructions, get_extension, html_to_text
class HelperFunctionTest(unittest.TestCase):
"""
Tests to ensure that the following helper functions work for the annotation tool
"""
sample_xml = '''
<annotatable>
<instructions><p>Helper Test Instructions.</p></instructions>
</annotatable>
'''
sample_sourceurl = "http://video-js.zencoder.com/oceans-clip.mp4"
sample_youtubeurl = "http://www.youtube.com/watch?v=yxLIu-scR9Y"
sample_html = '<p><b>Testing here</b> and not bolded here</p>'
def test_get_instructions(self):
"""
Function takes in an input of a specific xml string with surrounding instructions
tags and returns a valid html string.
"""
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div><p>Helper Test Instructions.</p></div>"
actual_xml = get_instructions(xmltree)
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = get_instructions(xmltree)
self.assertIsNone(actual)
def test_get_extension(self):
"""
Tests whether given a url if the video will return a youtube source or extension
"""
expectedyoutube = 'video/youtube'
expectednotyoutube = 'video/mp4'
result1 = get_extension(self.sample_sourceurl)
result2 = get_extension(self.sample_youtubeurl)
self.assertEqual(expectedyoutube, result2)
self.assertEqual(expectednotyoutube, result1)
def test_html_to_text(self):
expectedtext = "Testing here and not bolded here"
result = html_to_text(self.sample_html)
self.assertEqual(expectedtext, result)
| agpl-3.0 |
lepinsk/pydub | setup.py | 1 | 1425 | __doc__ = """
Manipulate audio with an simple and easy high level interface.
See the README file for details, usage info, and a list of gotchas.
"""
from setuptools import setup
setup(
name='pydub',
version='0.9.0',
author='James Robert',
author_email='jiaaro@gmail.com',
description='Manipulate audio with an simple and easy high level interface',
license='MIT',
keywords='audio sound high-level',
url='http://pydub.com',
packages=['pydub'],
long_description=__doc__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Topic :: Multimedia :: Sound/Audio :: Conversion",
"Topic :: Multimedia :: Sound/Audio :: Editors",
"Topic :: Multimedia :: Sound/Audio :: Mixers",
"Topic :: Software Development :: Libraries",
'Topic :: Utilities',
]
)
| mit |
Jeongseob/xen-coboost-sched | tools/python/xen/xend/XendBootloader.py | 35 | 7321 | #
# XendBootloader.py - Framework to run a boot loader for picking the kernel
#
# Copyright 2005-2006 Red Hat, Inc.
# Jeremy Katz <katzj@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os, select, errno, stat, signal, tty
import random
import shlex
from xen.xend import sxp
from xen.util import mkdir, oshelp
from XendLogging import log
from XendError import VmError
import pty, termios, fcntl
from xen.lowlevel import ptsname
def bootloader(blexec, disk, dom, quiet = False, blargs = '', kernel = '',
ramdisk = '', kernel_args = ''):
"""Run the boot loader executable on the given disk and return a
config image.
@param blexec Binary to use as the boot loader
@param disk Disk to run the boot loader on.
@param dom DomainInfo representing the domain being booted.
@param quiet Run in non-interactive mode, just booting the default.
@param blargs Arguments to pass to the bootloader."""
if not os.access(blexec, os.X_OK):
msg = "Bootloader isn't executable"
log.error(msg)
raise VmError(msg)
if not os.access(disk, os.R_OK):
msg = "Disk isn't accessible"
log.error(msg)
raise VmError(msg)
if os.uname()[0] == "NetBSD" and disk.startswith('/dev/'):
disk = "/r".join(disk.rsplit("/",1))
mkdir.parents("/var/run/xend/boot/", stat.S_IRWXU)
while True:
fifo = "/var/run/xend/boot/xenbl.%s" %(random.randint(0, 32000),)
try:
os.mkfifo(fifo, 0600)
except OSError, e:
if (e.errno != errno.EEXIST):
raise
break
# We need to present the bootloader's tty as a pty slave that xenconsole
# can access. Since the bootloader itself needs a pty slave,
# we end up with a connection like this:
#
# xenconsole -- (slave pty1 master) <-> (master pty2 slave) -- bootloader
#
# where we copy characters between the two master fds, as well as
# listening on the bootloader's fifo for the results.
(m1, s1) = pty.openpty()
# On Solaris, the pty master side will get cranky if we try
# to write to it while there is no slave. To work around this,
# keep the slave descriptor open until we're done. Set it
# to raw terminal parameters, otherwise it will echo back
# characters, which will confuse the I/O loop below.
# Furthermore, a raw master pty device has no terminal
# semantics on Solaris, so don't try to set any attributes
# for it.
if os.uname()[0] != 'SunOS' and os.uname()[0] != 'NetBSD':
tty.setraw(m1)
os.close(s1)
else:
tty.setraw(s1)
fcntl.fcntl(m1, fcntl.F_SETFL, os.O_NDELAY)
slavename = ptsname.ptsname(m1)
dom.storeDom("console/tty", slavename)
# Release the domain lock here, because we definitely don't want
# a stuck bootloader to deny service to other xend clients.
from xen.xend import XendDomain
domains = XendDomain.instance()
domains.domains_lock.release()
(child, m2) = pty.fork()
if (not child):
args = [ blexec ]
if kernel:
args.append("--kernel=%s" % kernel)
if ramdisk:
args.append("--ramdisk=%s" % ramdisk)
if kernel_args:
args.append("--args=%s" % kernel_args)
if quiet:
args.append("-q")
args.append("--output=%s" % fifo)
if blargs:
args.extend(shlex.split(blargs))
args.append(disk)
try:
log.debug("Launching bootloader as %s." % str(args))
env = os.environ.copy()
env['TERM'] = 'vt100'
oshelp.close_fds()
os.execvpe(args[0], args, env)
except OSError, e:
print e
pass
os._exit(1)
# record that this domain is bootloading
dom.bootloader_pid = child
# On Solaris, the master pty side does not have terminal semantics,
# so don't try to set any attributes, as it will fail.
if os.uname()[0] != 'SunOS':
tty.setraw(m2);
fcntl.fcntl(m2, fcntl.F_SETFL, os.O_NDELAY);
while True:
try:
r = os.open(fifo, os.O_RDONLY)
except OSError, e:
if e.errno == errno.EINTR:
continue
break
fcntl.fcntl(r, fcntl.F_SETFL, os.O_NDELAY);
ret = ""
inbuf=""; outbuf="";
# filedescriptors:
# r - input from the bootloader (bootstring output)
# m1 - input/output from/to xenconsole
# m2 - input/output from/to pty that controls the bootloader
# The filedescriptors are NDELAY, so it's ok to try to read
# bigger chunks than may be available, to keep e.g. curses
# screen redraws in the bootloader efficient. m1 is the side that
# gets xenconsole input, which will be keystrokes, so a small number
# is sufficient. m2 is pygrub output, which will be curses screen
# updates, so a larger number (1024) is appropriate there.
#
# For writeable descriptors, only include them in the set for select
# if there is actual data to write, otherwise this would loop too fast,
# eating up CPU time.
while True:
wsel = []
if len(outbuf) != 0:
wsel = wsel + [m1]
if len(inbuf) != 0:
wsel = wsel + [m2]
sel = select.select([r, m1, m2], wsel, [])
try:
if m1 in sel[0]:
s = os.read(m1, 16)
inbuf += s
if m2 in sel[1]:
n = os.write(m2, inbuf)
inbuf = inbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
try:
if m2 in sel[0]:
s = os.read(m2, 1024)
outbuf += s
if m1 in sel[1]:
n = os.write(m1, outbuf)
outbuf = outbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
if r in sel[0]:
s = os.read(r, 128)
ret = ret + s
if len(s) == 0:
break
del inbuf
del outbuf
os.waitpid(child, 0)
os.close(r)
os.close(m2)
os.close(m1)
if os.uname()[0] == 'SunOS' or os.uname()[0] == 'NetBSD':
os.close(s1)
os.unlink(fifo)
# Re-acquire the lock to cover the changes we're about to make
# when we return to domain creation.
domains.domains_lock.acquire()
if dom.bootloader_pid is None:
msg = "Domain was died while the bootloader was running."
log.error(msg)
raise VmError, msg
dom.bootloader_pid = None
if len(ret) == 0:
msg = "Boot loader didn't return any data!"
log.error(msg)
raise VmError, msg
pin = sxp.Parser()
pin.input(ret)
pin.input_eof()
blcfg = pin.val
return blcfg
def bootloader_tidy(dom):
if hasattr(dom, "bootloader_pid") and dom.bootloader_pid is not None:
pid = dom.bootloader_pid
dom.bootloader_pid = None
os.kill(pid, signal.SIGKILL)
| gpl-2.0 |
dennisss/sympy | sympy/mpmath/libmp/gammazeta.py | 17 | 78631 | """
-----------------------------------------------------------------------
This module implements gamma- and zeta-related functions:
* Bernoulli numbers
* Factorials
* The gamma function
* Polygamma functions
* Harmonic numbers
* The Riemann zeta function
* Constants related to these functions
-----------------------------------------------------------------------
"""
import math
from .backend import xrange
from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_THREE, gmpy
from .libintmath import list_primes, ifac, ifac2, moebius
from .libmpf import (\
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast,
lshift, sqrt_fixed, isqrt_fast,
fzero, fone, fnone, fhalf, ftwo, finf, fninf, fnan,
from_int, to_int, to_fixed, from_man_exp, from_rational,
mpf_pos, mpf_neg, mpf_abs, mpf_add, mpf_sub,
mpf_mul, mpf_mul_int, mpf_div, mpf_sqrt, mpf_pow_int,
mpf_rdiv_int,
mpf_perturb, mpf_le, mpf_lt, mpf_gt, mpf_shift,
negative_rnd, reciprocal_rnd,
bitcount, to_float, mpf_floor, mpf_sign, ComplexResult
)
from .libelefun import (\
constant_memo,
def_mpf_constant,
mpf_pi, pi_fixed, ln2_fixed, log_int_fixed, mpf_ln2,
mpf_exp, mpf_log, mpf_pow, mpf_cosh,
mpf_cos_sin, mpf_cosh_sinh, mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi,
ln_sqrt2pi_fixed, mpf_ln_sqrt2pi, sqrtpi_fixed, mpf_sqrtpi,
cos_sin_fixed, exp_fixed
)
from .libmpc import (\
mpc_zero, mpc_one, mpc_half, mpc_two,
mpc_abs, mpc_shift, mpc_pos, mpc_neg,
mpc_add, mpc_sub, mpc_mul, mpc_div,
mpc_add_mpf, mpc_mul_mpf, mpc_div_mpf, mpc_mpf_div,
mpc_mul_int, mpc_pow_int,
mpc_log, mpc_exp, mpc_pow,
mpc_cos_pi, mpc_sin_pi,
mpc_reciprocal, mpc_square,
mpc_sub_mpf
)
# Catalan's constant is computed using Lupas's rapidly convergent series
# (listed on http://mathworld.wolfram.com/CatalansConstant.html)
# oo
# ___ n-1 8n 2 3 2
# 1 \ (-1) 2 (40n - 24n + 3) [(2n)!] (n!)
# K = --- ) -----------------------------------------
# 64 /___ 3 2
# n (2n-1) [(4n)!]
# n = 1
@constant_memo
def catalan_fixed(prec):
prec = prec + 20
a = one = MPZ_ONE << prec
s, t, n = 0, 1, 1
while t:
a *= 32 * n**3 * (2*n-1)
a //= (3-16*n+16*n**2)**2
t = a * (-1)**(n-1) * (40*n**2-24*n+3) // (n**3 * (2*n-1))
s += t
n += 1
return s >> (20 + 6)
# Khinchin's constant is relatively difficult to compute. Here
# we use the rational zeta series
# oo 2*n-1
# ___ ___
# \ ` zeta(2*n)-1 \ ` (-1)^(k+1)
# log(K)*log(2) = ) ------------ ) ----------
# /___. n /___. k
# n = 1 k = 1
# which adds half a digit per term. The essential trick for achieving
# reasonable efficiency is to recycle both the values of the zeta
# function (essentially Bernoulli numbers) and the partial terms of
# the inner sum.
# An alternative might be to use K = 2*exp[1/log(2) X] where
# / 1 1 [ pi*x*(1-x^2) ]
# X = | ------ log [ ------------ ].
# / 0 x(1+x) [ sin(pi*x) ]
# and integrate numerically. In practice, this seems to be slightly
# slower than the zeta series at high precision.
@constant_memo
def khinchin_fixed(prec):
wp = int(prec + prec**0.5 + 15)
s = MPZ_ZERO
fac = from_int(4)
t = ONE = MPZ_ONE << wp
pi = mpf_pi(wp)
pipow = twopi2 = mpf_shift(mpf_mul(pi, pi, wp), 2)
n = 1
while 1:
zeta2n = mpf_abs(mpf_bernoulli(2*n, wp))
zeta2n = mpf_mul(zeta2n, pipow, wp)
zeta2n = mpf_div(zeta2n, fac, wp)
zeta2n = to_fixed(zeta2n, wp)
term = (((zeta2n - ONE) * t) // n) >> wp
if term < 100:
break
#if not n % 10:
# print n, math.log(int(abs(term)))
s += term
t += ONE//(2*n+1) - ONE//(2*n)
n += 1
fac = mpf_mul_int(fac, (2*n)*(2*n-1), wp)
pipow = mpf_mul(pipow, twopi2, wp)
s = (s << wp) // ln2_fixed(wp)
K = mpf_exp(from_man_exp(s, -wp), wp)
K = to_fixed(K, prec)
return K
# Glaisher's constant is defined as A = exp(1/2 - zeta'(-1)).
# One way to compute it would be to perform direct numerical
# differentiation, but computing arbitrary Riemann zeta function
# values at high precision is expensive. We instead use the formula
# A = exp((6 (-zeta'(2))/pi^2 + log 2 pi + gamma)/12)
# and compute zeta'(2) from the series representation
# oo
# ___
# \ log k
# -zeta'(2) = ) -----
# /___ 2
# k
# k = 2
# This series converges exceptionally slowly, but can be accelerated
# using Euler-Maclaurin formula. The important insight is that the
# E-M integral can be done in closed form and that the high order
# are given by
# n / \
# d | log x | a + b log x
# --- | ----- | = -----------
# n | 2 | 2 + n
# dx \ x / x
# where a and b are integers given by a simple recurrence. Note
# that just one logarithm is needed. However, lots of integer
# logarithms are required for the initial summation.
# This algorithm could possibly be turned into a faster algorithm
# for general evaluation of zeta(s) or zeta'(s); this should be
# looked into.
@constant_memo
def glaisher_fixed(prec):
wp = prec + 30
# Number of direct terms to sum before applying the Euler-Maclaurin
# formula to the tail. TODO: choose more intelligently
N = int(0.33*prec + 5)
ONE = MPZ_ONE << wp
# Euler-Maclaurin, step 1: sum log(k)/k**2 for k from 2 to N-1
s = MPZ_ZERO
for k in range(2, N):
#print k, N
s += log_int_fixed(k, wp) // k**2
logN = log_int_fixed(N, wp)
#logN = to_fixed(mpf_log(from_int(N), wp+20), wp)
# E-M step 2: integral of log(x)/x**2 from N to inf
s += (ONE + logN) // N
# E-M step 3: endpoint correction term f(N)/2
s += logN // (N**2 * 2)
# E-M step 4: the series of derivatives
pN = N**3
a = 1
b = -2
j = 3
fac = from_int(2)
k = 1
while 1:
# D(2*k-1) * B(2*k) / fac(2*k) [D(n) = nth derivative]
D = ((a << wp) + b*logN) // pN
D = from_man_exp(D, -wp)
B = mpf_bernoulli(2*k, wp)
term = mpf_mul(B, D, wp)
term = mpf_div(term, fac, wp)
term = to_fixed(term, wp)
if abs(term) < 100:
break
#if not k % 10:
# print k, math.log(int(abs(term)), 10)
s -= term
# Advance derivative twice
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
k += 1
fac = mpf_mul_int(fac, (2*k)*(2*k-1), wp)
# A = exp((6*s/pi**2 + log(2*pi) + euler)/12)
pi = pi_fixed(wp)
s *= 6
s = (s << wp) // (pi**2 >> wp)
s += euler_fixed(wp)
s += to_fixed(mpf_log(from_man_exp(2*pi, -wp), wp), wp)
s //= 12
A = mpf_exp(from_man_exp(s, -wp), wp)
return to_fixed(A, prec)
# Apery's constant can be computed using the very rapidly convergent
# series
# oo
# ___ 2 10
# \ n 205 n + 250 n + 77 (n!)
# zeta(3) = ) (-1) ------------------- ----------
# /___ 64 5
# n = 0 ((2n+1)!)
@constant_memo
def apery_fixed(prec):
prec += 20
d = MPZ_ONE << prec
term = MPZ(77) << prec
n = 1
s = MPZ_ZERO
while term:
s += term
d *= (n**10)
d //= (((2*n+1)**5) * (2*n)**5)
term = (-1)**n * (205*(n**2) + 250*n + 77) * d
n += 1
return s >> (20 + 6)
"""
Euler's constant (gamma) is computed using the Brent-McMillan formula,
gamma ~= I(n)/J(n) - log(n), where
I(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k)
J(n) = sum_{k=0,1,2,...} (n**k / k!)**2
H(k) = 1 + 1/2 + 1/3 + ... + 1/k
The error is bounded by O(exp(-4n)). Choosing n to be a power
of two, 2**p, the logarithm becomes particularly easy to calculate.[1]
We use the formulation of Algorithm 3.9 in [2] to make the summation
more efficient.
Reference:
[1] Xavier Gourdon & Pascal Sebah, The Euler constant: gamma
http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf
[2] Jonathan Borwein & David Bailey, Mathematics by Experiment,
A K Peters, 2003
"""
@constant_memo
def euler_fixed(prec):
extra = 30
prec += extra
# choose p such that exp(-4*(2**p)) < 2**-n
p = int(math.log((prec/4) * math.log(2), 2)) + 1
n = 2**p
A = U = -p*ln2_fixed(prec)
B = V = MPZ_ONE << prec
k = 1
while 1:
B = B*n**2//k**2
A = (A*n**2//k + B)//k
U += A
V += B
if max(abs(A), abs(B)) < 100:
break
k += 1
return (U<<(prec-extra))//V
# Use zeta accelerated formulas for the Mertens and twin
# prime constants; see
# http://mathworld.wolfram.com/MertensConstant.html
# http://mathworld.wolfram.com/TwinPrimesConstant.html
@constant_memo
def mertens_fixed(prec):
wp = prec + 20
m = 2
s = mpf_euler(wp)
while 1:
t = mpf_zeta_int(m, wp)
if t == fone:
break
t = mpf_log(t, wp)
t = mpf_mul_int(t, moebius(m), wp)
t = mpf_div(t, from_int(m), wp)
s = mpf_add(s, t)
m += 1
return to_fixed(s, prec)
@constant_memo
def twinprime_fixed(prec):
def I(n):
return sum(moebius(d)<<(n//d) for d in xrange(1,n+1) if not n%d)//n
wp = 2*prec + 30
res = fone
primes = [from_rational(1,p,wp) for p in [2,3,5,7]]
ppowers = [mpf_mul(p,p,wp) for p in primes]
n = 2
while 1:
a = mpf_zeta_int(n, wp)
for i in range(4):
a = mpf_mul(a, mpf_sub(fone, ppowers[i]), wp)
ppowers[i] = mpf_mul(ppowers[i], primes[i], wp)
a = mpf_pow_int(a, -I(n), wp)
if mpf_pos(a, prec+10, 'n') == fone:
break
#from libmpf import to_str
#print n, to_str(mpf_sub(fone, a), 6)
res = mpf_mul(res, a, wp)
n += 1
res = mpf_mul(res, from_int(3*15*35), wp)
res = mpf_div(res, from_int(4*16*36), wp)
return to_fixed(res, prec)
mpf_euler = def_mpf_constant(euler_fixed)
mpf_apery = def_mpf_constant(apery_fixed)
mpf_khinchin = def_mpf_constant(khinchin_fixed)
mpf_glaisher = def_mpf_constant(glaisher_fixed)
mpf_catalan = def_mpf_constant(catalan_fixed)
mpf_mertens = def_mpf_constant(mertens_fixed)
mpf_twinprime = def_mpf_constant(twinprime_fixed)
#-----------------------------------------------------------------------#
# #
# Bernoulli numbers #
# #
#-----------------------------------------------------------------------#
MAX_BERNOULLI_CACHE = 3000
"""
Small Bernoulli numbers and factorials are used in numerous summations,
so it is critical for speed that sequential computation is fast and that
values are cached up to a fairly high threshold.
On the other hand, we also want to support fast computation of isolated
large numbers. Currently, no such acceleration is provided for integer
factorials (though it is for large floating-point factorials, which are
computed via gamma if the precision is low enough).
For sequential computation of Bernoulli numbers, we use Ramanujan's formula
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
For isolated large Bernoulli numbers, we use the Riemann zeta function
to calculate a numerical value for B_n. The von Staudt-Clausen theorem
can then be used to optionally find the exact value of the
numerator and denominator.
"""
bernoulli_cache = {}
f3 = from_int(3)
f6 = from_int(6)
def bernoulli_size(n):
"""Accurately estimate the size of B_n (even n > 2 only)"""
lgn = math.log(n,2)
return int(2.326 + 0.5*lgn + n*(lgn - 4.094))
BERNOULLI_PREC_CUTOFF = bernoulli_size(MAX_BERNOULLI_CACHE)
def mpf_bernoulli(n, prec, rnd=None):
"""Computation of Bernoulli numbers (numerically)"""
if n < 2:
if n < 0:
raise ValueError("Bernoulli numbers only defined for n >= 0")
if n == 0:
return fone
if n == 1:
return mpf_neg(fhalf)
# For odd n > 1, the Bernoulli numbers are zero
if n & 1:
return fzero
# If precision is extremely high, we can save time by computing
# the Bernoulli number at a lower precision that is sufficient to
# obtain the exact fraction, round to the exact fraction, and
# convert the fraction back to an mpf value at the original precision
if prec > BERNOULLI_PREC_CUTOFF and prec > bernoulli_size(n)*1.1 + 1000:
p, q = bernfrac(n)
return from_rational(p, q, prec, rnd or round_floor)
if n > MAX_BERNOULLI_CACHE:
return mpf_bernoulli_huge(n, prec, rnd)
wp = prec + 30
# Reuse nearby precisions
wp += 32 - (prec & 31)
cached = bernoulli_cache.get(wp)
if cached:
numbers, state = cached
if n in numbers:
if not rnd:
return numbers[n]
return mpf_pos(numbers[n], prec, rnd)
m, bin, bin1 = state
if n - m > 10:
return mpf_bernoulli_huge(n, prec, rnd)
else:
if n > 10:
return mpf_bernoulli_huge(n, prec, rnd)
numbers = {0:fone}
m, bin, bin1 = state = [2, MPZ(10), MPZ_ONE]
bernoulli_cache[wp] = (numbers, state)
while m <= n:
#print m
case = m % 6
# Accurately estimate size of B_m so we can use
# fixed point math without using too much precision
szbm = bernoulli_size(m)
s = 0
sexp = max(0, szbm) - wp
if m < 6:
a = MPZ_ZERO
else:
a = bin1
for j in xrange(1, m//6+1):
usign, uman, uexp, ubc = u = numbers[m-6*j]
if usign:
uman = -uman
s += lshift(a*uman, uexp-sexp)
# Update inner binomial coefficient
j6 = 6*j
a *= ((m-5-j6)*(m-4-j6)*(m-3-j6)*(m-2-j6)*(m-1-j6)*(m-j6))
a //= ((4+j6)*(5+j6)*(6+j6)*(7+j6)*(8+j6)*(9+j6))
if case == 0: b = mpf_rdiv_int(m+3, f3, wp)
if case == 2: b = mpf_rdiv_int(m+3, f3, wp)
if case == 4: b = mpf_rdiv_int(-m-3, f6, wp)
s = from_man_exp(s, sexp, wp)
b = mpf_div(mpf_sub(b, s, wp), from_int(bin), wp)
numbers[m] = b
m += 2
# Update outer binomial coefficient
bin = bin * ((m+2)*(m+3)) // (m*(m-1))
if m > 6:
bin1 = bin1 * ((2+m)*(3+m)) // ((m-7)*(m-6))
state[:] = [m, bin, bin1]
return numbers[n]
def mpf_bernoulli_huge(n, prec, rnd=None):
wp = prec + 10
piprec = wp + int(math.log(n,2))
v = mpf_gamma_int(n+1, wp)
v = mpf_mul(v, mpf_zeta_int(n, wp), wp)
v = mpf_mul(v, mpf_pow_int(mpf_pi(piprec), -n, wp))
v = mpf_shift(v, 1-n)
if not n & 3:
v = mpf_neg(v)
return mpf_pos(v, prec, rnd or round_fast)
def bernfrac(n):
r"""
Returns a tuple of integers `(p, q)` such that `p/q = B_n` exactly,
where `B_n` denotes the `n`-th Bernoulli number. The fraction is
always reduced to lowest terms. Note that for `n > 1` and `n` odd,
`B_n = 0`, and `(0, 1)` is returned.
**Examples**
The first few Bernoulli numbers are exactly::
>>> from sympy.mpmath import *
>>> for n in range(15):
... p, q = bernfrac(n)
... print("%s %s/%s" % (n, p, q))
...
0 1/1
1 -1/2
2 1/6
3 0/1
4 -1/30
5 0/1
6 1/42
7 0/1
8 -1/30
9 0/1
10 5/66
11 0/1
12 -691/2730
13 0/1
14 7/6
This function works for arbitrarily large `n`::
>>> p, q = bernfrac(10**4)
>>> print(q)
2338224387510
>>> print(len(str(p)))
27692
>>> mp.dps = 15
>>> print(mpf(p) / q)
-9.04942396360948e+27677
>>> print(bernoulli(10**4))
-9.04942396360948e+27677
.. note ::
:func:`~mpmath.bernoulli` computes a floating-point approximation
directly, without computing the exact fraction first.
This is much faster for large `n`.
**Algorithm**
:func:`~mpmath.bernfrac` works by computing the value of `B_n` numerically
and then using the von Staudt-Clausen theorem [1] to reconstruct
the exact fraction. For large `n`, this is significantly faster than
computing `B_1, B_2, \ldots, B_2` recursively with exact arithmetic.
The implementation has been tested for `n = 10^m` up to `m = 6`.
In practice, :func:`~mpmath.bernfrac` appears to be about three times
slower than the specialized program calcbn.exe [2]
**References**
1. MathWorld, von Staudt-Clausen Theorem:
http://mathworld.wolfram.com/vonStaudt-ClausenTheorem.html
2. The Bernoulli Number Page:
http://bernoulli.org/
"""
n = int(n)
if n < 3:
return [(1, 1), (-1, 2), (1, 6)][n]
if n & 1:
return (0, 1)
q = 1
for k in list_primes(n+1):
if not (n % (k-1)):
q *= k
prec = bernoulli_size(n) + int(math.log(q,2)) + 20
b = mpf_bernoulli(n, prec)
p = mpf_mul(b, from_int(q))
pint = to_int(p, round_nearest)
return (pint, q)
#-----------------------------------------------------------------------#
# #
# The gamma function (OLD IMPLEMENTATION) #
# #
#-----------------------------------------------------------------------#
"""
We compute the real factorial / gamma function using Spouge's approximation
x! = (x+a)**(x+1/2) * exp(-x-a) * [c_0 + S(x) + eps]
where S(x) is the sum of c_k/(x+k) from k = 1 to a-1 and the coefficients
are given by
c_0 = sqrt(2*pi)
(-1)**(k-1)
c_k = ----------- (a-k)**(k-1/2) exp(-k+a), k = 1,2,...,a-1
(k - 1)!
As proved by Spouge, if we choose a = log(2)/log(2*pi)*n = 0.38*n, the
relative error eps is less than 2^(-n) for any x in the right complex
half-plane (assuming a > 2). In practice, it seems that a can be chosen
quite a bit lower still (30-50%); this possibility should be investigated.
For negative x, we use the reflection formula.
References:
-----------
John L. Spouge, "Computation of the gamma, digamma, and trigamma
functions", SIAM Journal on Numerical Analysis 31 (1994), no. 3, 931-944.
"""
spouge_cache = {}
def calc_spouge_coefficients(a, prec):
wp = prec + int(a*1.4)
c = [0] * a
# b = exp(a-1)
b = mpf_exp(from_int(a-1), wp)
# e = exp(1)
e = mpf_exp(fone, wp)
# sqrt(2*pi)
sq2pi = mpf_sqrt(mpf_shift(mpf_pi(wp), 1), wp)
c[0] = to_fixed(sq2pi, prec)
for k in xrange(1, a):
# c[k] = ((-1)**(k-1) * (a-k)**k) * b / sqrt(a-k)
term = mpf_mul_int(b, ((-1)**(k-1) * (a-k)**k), wp)
term = mpf_div(term, mpf_sqrt(from_int(a-k), wp), wp)
c[k] = to_fixed(term, prec)
# b = b / (e * k)
b = mpf_div(b, mpf_mul(e, from_int(k), wp), wp)
return c
# Cached lookup of coefficients
def get_spouge_coefficients(prec):
# This exact precision has been used before
if prec in spouge_cache:
return spouge_cache[prec]
for p in spouge_cache:
if 0.8 <= prec/float(p) < 1:
return spouge_cache[p]
# Here we estimate the value of a based on Spouge's inequality for
# the relative error
a = max(3, int(0.38*prec)) # 0.38 = log(2)/log(2*pi), ~= 1.26*n
coefs = calc_spouge_coefficients(a, prec)
spouge_cache[prec] = (prec, a, coefs)
return spouge_cache[prec]
def spouge_sum_real(x, prec, a, c):
x = to_fixed(x, prec)
s = c[0]
for k in xrange(1, a):
s += (c[k] << prec) // (x + (k << prec))
return from_man_exp(s, -prec, prec, round_floor)
# Unused: for fast computation of gamma(p/q)
def spouge_sum_rational(p, q, prec, a, c):
s = c[0]
for k in xrange(1, a):
s += c[k] * q // (p+q*k)
return from_man_exp(s, -prec, prec, round_floor)
# For a complex number a + b*I, we have
#
# c_k (a+k)*c_k b * c_k
# ------------- = --------- - ------- * I
# (a + b*I) + k M M
#
# 2 2 2 2 2
# where M = (a+k) + b = (a + b ) + (2*a*k + k )
def spouge_sum_complex(re, im, prec, a, c):
re = to_fixed(re, prec)
im = to_fixed(im, prec)
sre, sim = c[0], 0
mag = ((re**2)>>prec) + ((im**2)>>prec)
for k in xrange(1, a):
M = mag + re*(2*k) + ((k**2) << prec)
sre += (c[k] * (re + (k << prec))) // M
sim -= (c[k] * im) // M
re = from_man_exp(sre, -prec, prec, round_floor)
im = from_man_exp(sim, -prec, prec, round_floor)
return re, im
def mpf_gamma_int_old(n, prec, rounding=round_fast):
if n < 1000:
return from_int(ifac(n-1), prec, rounding)
# XXX: choose the cutoff less arbitrarily
size = int(n*math.log(n,2))
if prec > size/20.0:
return from_int(ifac(n-1), prec, rounding)
return mpf_gamma(from_int(n), prec, rounding)
def mpf_factorial_old(x, prec, rounding=round_fast):
return mpf_gamma_old(x, prec, rounding, p1=0)
def mpc_factorial_old(x, prec, rounding=round_fast):
return mpc_gamma_old(x, prec, rounding, p1=0)
def mpf_gamma_old(x, prec, rounding=round_fast, p1=1):
"""
Computes the gamma function of a real floating-point argument.
With p1=0, computes a factorial instead.
"""
sign, man, exp, bc = x
if not man:
if x == finf:
return finf
if x == fninf or x == fnan:
return fnan
# More precision is needed for enormous x. TODO:
# use Stirling's formula + Euler-Maclaurin summation
size = exp + bc
if size > 5:
size = int(size * math.log(size,2))
wp = prec + max(0, size) + 15
if exp >= 0:
if sign or (p1 and not man):
raise ValueError("gamma function pole")
# A direct factorial is fastest
if exp + bc <= 10:
return from_int(ifac((man<<exp)-p1), prec, rounding)
reflect = sign or exp+bc < -1
if p1:
# Should be done exactly!
x = mpf_sub(x, fone)
# x < 0.25
if reflect:
# gamma = pi / (sin(pi*x) * gamma(1-x))
wp += 15
pix = mpf_mul(x, mpf_pi(wp), wp)
t = mpf_sin_pi(x, wp)
g = mpf_gamma_old(mpf_sub(fone, x), wp)
return mpf_div(pix, mpf_mul(t, g, wp), prec, rounding)
sprec, a, c = get_spouge_coefficients(wp)
s = spouge_sum_real(x, sprec, a, c)
# gamma = exp(log(x+a)*(x+0.5) - xpa) * s
xpa = mpf_add(x, from_int(a), wp)
logxpa = mpf_log(xpa, wp)
xph = mpf_add(x, fhalf, wp)
t = mpf_sub(mpf_mul(logxpa, xph, wp), xpa, wp)
t = mpf_mul(mpf_exp(t, wp), s, prec, rounding)
return t
def mpc_gamma_old(x, prec, rounding=round_fast, p1=1):
re, im = x
if im == fzero:
return mpf_gamma_old(re, prec, rounding, p1), fzero
# More precision is needed for enormous x.
sign, man, exp, bc = re
isign, iman, iexp, ibc = im
if re == fzero:
size = iexp+ibc
else:
size = max(exp+bc, iexp+ibc)
if size > 5:
size = int(size * math.log(size,2))
reflect = sign or (exp+bc < -1)
wp = prec + max(0, size) + 25
# Near x = 0 pole (TODO: other poles)
if p1:
if size < -prec-5:
return mpc_add_mpf(mpc_div(mpc_one, x, 2*prec+10), \
mpf_neg(mpf_euler(2*prec+10)), prec, rounding)
elif size < -5:
wp += (-2*size)
if p1:
# Should be done exactly!
re_orig = re
re = mpf_sub(re, fone, bc+abs(exp)+2)
x = re, im
if reflect:
# Reflection formula
wp += 15
pi = mpf_pi(wp), fzero
pix = mpc_mul(x, pi, wp)
t = mpc_sin_pi(x, wp)
u = mpc_sub(mpc_one, x, wp)
g = mpc_gamma_old(u, wp)
w = mpc_mul(t, g, wp)
return mpc_div(pix, w, wp)
# Extremely close to the real line?
# XXX: reflection formula
if iexp+ibc < -wp:
a = mpf_gamma_old(re_orig, wp)
b = mpf_psi0(re_orig, wp)
gamma_diff = mpf_div(a, b, wp)
return mpf_pos(a, prec, rounding), mpf_mul(gamma_diff, im, prec, rounding)
sprec, a, c = get_spouge_coefficients(wp)
s = spouge_sum_complex(re, im, sprec, a, c)
# gamma = exp(log(x+a)*(x+0.5) - xpa) * s
repa = mpf_add(re, from_int(a), wp)
logxpa = mpc_log((repa, im), wp)
reph = mpf_add(re, fhalf, wp)
t = mpc_sub(mpc_mul(logxpa, (reph, im), wp), (repa, im), wp)
t = mpc_mul(mpc_exp(t, wp), s, prec, rounding)
return t
#-----------------------------------------------------------------------#
# #
# Polygamma functions #
# #
#-----------------------------------------------------------------------#
"""
For all polygamma (psi) functions, we use the Euler-Maclaurin summation
formula. It looks slightly different in the m = 0 and m > 0 cases.
For m = 0, we have
oo
___ B
(0) 1 \ 2 k -2 k
psi (z) ~ log z + --- - ) ------ z
2 z /___ (2 k)!
k = 1
Experiment shows that the minimum term of the asymptotic series
reaches 2^(-p) when Re(z) > 0.11*p. So we simply use the recurrence
for psi (equivalent, in fact, to summing to the first few terms
directly before applying E-M) to obtain z large enough.
Since, very crudely, log z ~= 1 for Re(z) > 1, we can use
fixed-point arithmetic (if z is extremely large, log(z) itself
is a sufficient approximation, so we can stop there already).
For Re(z) << 0, we could use recurrence, but this is of course
inefficient for large negative z, so there we use the
reflection formula instead.
For m > 0, we have
N - 1
___
~~~(m) [ \ 1 ] 1 1
psi (z) ~ [ ) -------- ] + ---------- + -------- +
[ /___ m+1 ] m+1 m
k = 1 (z+k) ] 2 (z+N) m (z+N)
oo
___ B
\ 2 k (m+1) (m+2) ... (m+2k-1)
+ ) ------ ------------------------
/___ (2 k)! m + 2 k
k = 1 (z+N)
where ~~~ denotes the function rescaled by 1/((-1)^(m+1) m!).
Here again N is chosen to make z+N large enough for the minimum
term in the last series to become smaller than eps.
TODO: the current estimation of N for m > 0 is *very suboptimal*.
TODO: implement the reflection formula for m > 0, Re(z) << 0.
It is generally a combination of multiple cotangents. Need to
figure out a reasonably simple way to generate these formulas
on the fly.
TODO: maybe use exact algorithms to compute psi for integral
and certain rational arguments, as this can be much more
efficient. (On the other hand, the availability of these
special values provides a convenient way to test the general
algorithm.)
"""
# Harmonic numbers are just shifted digamma functions
# We should calculate these exactly when x is an integer
# and when doing so is faster.
def mpf_harmonic(x, prec, rnd):
if x in (fzero, fnan, finf):
return x
a = mpf_psi0(mpf_add(fone, x, prec+5), prec)
return mpf_add(a, mpf_euler(prec+5, rnd), prec, rnd)
def mpc_harmonic(z, prec, rnd):
if z[1] == fzero:
return (mpf_harmonic(z[0], prec, rnd), fzero)
a = mpc_psi0(mpc_add_mpf(z, fone, prec+5), prec)
return mpc_add_mpf(a, mpf_euler(prec+5, rnd), prec, rnd)
def mpf_psi0(x, prec, rnd=round_fast):
"""
Computation of the digamma function (psi function of order 0)
of a real argument.
"""
sign, man, exp, bc = x
wp = prec + 10
if not man:
if x == finf: return x
if x == fninf or x == fnan: return fnan
if x == fzero or (exp >= 0 and sign):
raise ValueError("polygamma pole")
# Reflection formula
if sign and exp+bc > 3:
c, s = mpf_cos_sin_pi(x, wp)
q = mpf_mul(mpf_div(c, s, wp), mpf_pi(wp), wp)
p = mpf_psi0(mpf_sub(fone, x, wp), wp)
return mpf_sub(p, q, prec, rnd)
# The logarithmic term is accurate enough
if (not sign) and bc + exp > wp:
return mpf_log(mpf_sub(x, fone, wp), prec, rnd)
# Initial recurrence to obtain a large enough x
m = to_int(x)
n = int(0.11*wp) + 2
s = MPZ_ZERO
x = to_fixed(x, wp)
one = MPZ_ONE << wp
if m < n:
for k in xrange(m, n):
s -= (one << wp) // x
x += one
x -= one
# Logarithmic term
s += to_fixed(mpf_log(from_man_exp(x, -wp, wp), wp), wp)
# Endpoint term in Euler-Maclaurin expansion
s += (one << wp) // (2*x)
# Euler-Maclaurin remainder sum
x2 = (x*x) >> wp
t = one
prev = 0
k = 1
while 1:
t = (t*x2) >> wp
bsign, bman, bexp, bbc = mpf_bernoulli(2*k, wp)
offset = (bexp + 2*wp)
if offset >= 0: term = (bman << offset) // (t*(2*k))
else: term = (bman >> (-offset)) // (t*(2*k))
if k & 1: s -= term
else: s += term
if k > 2 and term >= prev:
break
prev = term
k += 1
return from_man_exp(s, -wp, wp, rnd)
def mpc_psi0(z, prec, rnd=round_fast):
"""
Computation of the digamma function (psi function of order 0)
of a complex argument.
"""
re, im = z
# Fall back to the real case
if im == fzero:
return (mpf_psi0(re, prec, rnd), fzero)
wp = prec + 20
sign, man, exp, bc = re
# Reflection formula
if sign and exp+bc > 3:
c = mpc_cos_pi(z, wp)
s = mpc_sin_pi(z, wp)
q = mpc_mul_mpf(mpc_div(c, s, wp), mpf_pi(wp), wp)
p = mpc_psi0(mpc_sub(mpc_one, z, wp), wp)
return mpc_sub(p, q, prec, rnd)
# Just the logarithmic term
if (not sign) and bc + exp > wp:
return mpc_log(mpc_sub(z, mpc_one, wp), prec, rnd)
# Initial recurrence to obtain a large enough z
w = to_int(re)
n = int(0.11*wp) + 2
s = mpc_zero
if w < n:
for k in xrange(w, n):
s = mpc_sub(s, mpc_reciprocal(z, wp), wp)
z = mpc_add_mpf(z, fone, wp)
z = mpc_sub(z, mpc_one, wp)
# Logarithmic and endpoint term
s = mpc_add(s, mpc_log(z, wp), wp)
s = mpc_add(s, mpc_div(mpc_half, z, wp), wp)
# Euler-Maclaurin remainder sum
z2 = mpc_square(z, wp)
t = mpc_one
prev = mpc_zero
k = 1
eps = mpf_shift(fone, -wp+2)
while 1:
t = mpc_mul(t, z2, wp)
bern = mpf_bernoulli(2*k, wp)
term = mpc_mpf_div(bern, mpc_mul_int(t, 2*k, wp), wp)
s = mpc_sub(s, term, wp)
szterm = mpc_abs(term, 10)
if k > 2 and mpf_le(szterm, eps):
break
prev = term
k += 1
return s
# Currently unoptimized
def mpf_psi(m, x, prec, rnd=round_fast):
"""
Computation of the polygamma function of arbitrary integer order
m >= 0, for a real argument x.
"""
if m == 0:
return mpf_psi0(x, prec, rnd=round_fast)
return mpc_psi(m, (x, fzero), prec, rnd)[0]
def mpc_psi(m, z, prec, rnd=round_fast):
"""
Computation of the polygamma function of arbitrary integer order
m >= 0, for a complex argument z.
"""
if m == 0:
return mpc_psi0(z, prec, rnd)
re, im = z
wp = prec + 20
sign, man, exp, bc = re
if not im[1]:
if im in (finf, fninf, fnan):
return (fnan, fnan)
if not man:
if re == finf and im == fzero:
return (fzero, fzero)
if re == fnan:
return (fnan, fnan)
# Recurrence
w = to_int(re)
n = int(0.4*wp + 4*m)
s = mpc_zero
if w < n:
for k in xrange(w, n):
t = mpc_pow_int(z, -m-1, wp)
s = mpc_add(s, t, wp)
z = mpc_add_mpf(z, fone, wp)
zm = mpc_pow_int(z, -m, wp)
z2 = mpc_pow_int(z, -2, wp)
# 1/m*(z+N)^m
integral_term = mpc_div_mpf(zm, from_int(m), wp)
s = mpc_add(s, integral_term, wp)
# 1/2*(z+N)^(-(m+1))
s = mpc_add(s, mpc_mul_mpf(mpc_div(zm, z, wp), fhalf, wp), wp)
a = m + 1
b = 2
k = 1
# Important: we want to sum up to the *relative* error,
# not the absolute error, because psi^(m)(z) might be tiny
magn = mpc_abs(s, 10)
magn = magn[2]+magn[3]
eps = mpf_shift(fone, magn-wp+2)
while 1:
zm = mpc_mul(zm, z2, wp)
bern = mpf_bernoulli(2*k, wp)
scal = mpf_mul_int(bern, a, wp)
scal = mpf_div(scal, from_int(b), wp)
term = mpc_mul_mpf(zm, scal, wp)
s = mpc_add(s, term, wp)
szterm = mpc_abs(term, 10)
if k > 2 and mpf_le(szterm, eps):
break
#print k, to_str(szterm, 10), to_str(eps, 10)
a *= (m+2*k)*(m+2*k+1)
b *= (2*k+1)*(2*k+2)
k += 1
# Scale and sign factor
v = mpc_mul_mpf(s, mpf_gamma(from_int(m+1), wp), prec, rnd)
if not (m & 1):
v = mpf_neg(v[0]), mpf_neg(v[1])
return v
#-----------------------------------------------------------------------#
# #
# Riemann zeta function #
# #
#-----------------------------------------------------------------------#
"""
We use zeta(s) = eta(s) / (1 - 2**(1-s)) and Borwein's approximation
n-1
___ k
-1 \ (-1) (d_k - d_n)
eta(s) ~= ---- ) ------------------
d_n /___ s
k = 0 (k + 1)
where
k
___ i
\ (n + i - 1)! 4
d_k = n ) ---------------.
/___ (n - i)! (2i)!
i = 0
If s = a + b*I, the absolute error for eta(s) is bounded by
3 (1 + 2|b|)
------------ * exp(|b| pi/2)
n
(3+sqrt(8))
Disregarding the linear term, we have approximately,
log(err) ~= log(exp(1.58*|b|)) - log(5.8**n)
log(err) ~= 1.58*|b| - log(5.8)*n
log(err) ~= 1.58*|b| - 1.76*n
log2(err) ~= 2.28*|b| - 2.54*n
So for p bits, we should choose n > (p + 2.28*|b|) / 2.54.
References:
-----------
Peter Borwein, "An Efficient Algorithm for the Riemann Zeta Function"
http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P117.ps
http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
borwein_cache = {}
def borwein_coefficients(n):
if n in borwein_cache:
return borwein_cache[n]
ds = [MPZ_ZERO] * (n+1)
d = MPZ_ONE
s = ds[0] = MPZ_ONE
for i in range(1, n+1):
d = d * 4 * (n+i-1) * (n-i+1)
d //= ((2*i) * ((2*i)-1))
s += d
ds[i] = s
borwein_cache[n] = ds
return ds
ZETA_INT_CACHE_MAX_PREC = 1000
zeta_int_cache = {}
def mpf_zeta_int(s, prec, rnd=round_fast):
"""
Optimized computation of zeta(s) for an integer s.
"""
wp = prec + 20
s = int(s)
if s in zeta_int_cache and zeta_int_cache[s][0] >= wp:
return mpf_pos(zeta_int_cache[s][1], prec, rnd)
if s < 2:
if s == 1:
raise ValueError("zeta(1) pole")
if not s:
return mpf_neg(fhalf)
return mpf_div(mpf_bernoulli(-s+1, wp), from_int(s-1), prec, rnd)
# 2^-s term vanishes?
if s >= wp:
return mpf_perturb(fone, 0, prec, rnd)
# 5^-s term vanishes?
elif s >= wp*0.431:
t = one = 1 << wp
t += 1 << (wp - s)
t += one // (MPZ_THREE ** s)
t += 1 << max(0, wp - s*2)
return from_man_exp(t, -wp, prec, rnd)
else:
# Fast enough to sum directly?
# Even better, we use the Euler product (idea stolen from pari)
m = (float(wp)/(s-1) + 1)
if m < 30:
needed_terms = int(2.0**m + 1)
if needed_terms < int(wp/2.54 + 5) / 10:
t = fone
for k in list_primes(needed_terms):
#print k, needed_terms
powprec = int(wp - s*math.log(k,2))
if powprec < 2:
break
a = mpf_sub(fone, mpf_pow_int(from_int(k), -s, powprec), wp)
t = mpf_mul(t, a, wp)
return mpf_div(fone, t, wp)
# Use Borwein's algorithm
n = int(wp/2.54 + 5)
d = borwein_coefficients(n)
t = MPZ_ZERO
s = MPZ(s)
for k in xrange(n):
t += (((-1)**k * (d[k] - d[n])) << wp) // (k+1)**s
t = (t << wp) // (-d[n])
t = (t << wp) // ((1 << wp) - (1 << (wp+1-s)))
if (s in zeta_int_cache and zeta_int_cache[s][0] < wp) or (s not in zeta_int_cache):
zeta_int_cache[s] = (wp, from_man_exp(t, -wp-wp))
return from_man_exp(t, -wp-wp, prec, rnd)
def mpf_zeta(s, prec, rnd=round_fast, alt=0):
sign, man, exp, bc = s
if not man:
if s == fzero:
if alt:
return fhalf
else:
return mpf_neg(fhalf)
if s == finf:
return fone
return fnan
wp = prec + 20
# First term vanishes?
if (not sign) and (exp + bc > (math.log(wp,2) + 2)):
return mpf_perturb(fone, alt, prec, rnd)
# Optimize for integer arguments
elif exp >= 0:
if alt:
if s == fone:
return mpf_ln2(prec, rnd)
z = mpf_zeta_int(to_int(s), wp, negative_rnd[rnd])
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_mul(z, q, prec, rnd)
else:
return mpf_zeta_int(to_int(s), prec, rnd)
# Negative: use the reflection formula
# Borwein only proves the accuracy bound for x >= 1/2. However, based on
# tests, the accuracy without reflection is quite good even some distance
# to the left of 1/2. XXX: verify this.
if sign:
# XXX: could use the separate refl. formula for Dirichlet eta
if alt:
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_mul(mpf_zeta(s, wp), q, prec, rnd)
# XXX: -1 should be done exactly
y = mpf_sub(fone, s, 10*wp)
a = mpf_gamma(y, wp)
b = mpf_zeta(y, wp)
c = mpf_sin_pi(mpf_shift(s, -1), wp)
wp2 = wp + max(0,exp+bc)
pi = mpf_pi(wp+wp2)
d = mpf_div(mpf_pow(mpf_shift(pi, 1), s, wp2), pi, wp2)
return mpf_mul(a,mpf_mul(b,mpf_mul(c,d,wp),wp),prec,rnd)
# Near pole
r = mpf_sub(fone, s, wp)
asign, aman, aexp, abc = mpf_abs(r)
pole_dist = -2*(aexp+abc)
if pole_dist > wp:
if alt:
return mpf_ln2(prec, rnd)
else:
q = mpf_neg(mpf_div(fone, r, wp))
return mpf_add(q, mpf_euler(wp), prec, rnd)
else:
wp += max(0, pole_dist)
t = MPZ_ZERO
#wp += 16 - (prec & 15)
# Use Borwein's algorithm
n = int(wp/2.54 + 5)
d = borwein_coefficients(n)
t = MPZ_ZERO
sf = to_fixed(s, wp)
ln2 = ln2_fixed(wp)
for k in xrange(n):
u = (-sf*log_int_fixed(k+1, wp, ln2)) >> wp
#esign, eman, eexp, ebc = mpf_exp(u, wp)
#offset = eexp + wp
#if offset >= 0:
# w = ((d[k] - d[n]) * eman) << offset
#else:
# w = ((d[k] - d[n]) * eman) >> (-offset)
eman = exp_fixed(u, wp, ln2)
w = (d[k] - d[n]) * eman
if k & 1:
t -= w
else:
t += w
t = t // (-d[n])
t = from_man_exp(t, -wp, wp)
if alt:
return mpf_pos(t, prec, rnd)
else:
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_div(t, q, prec, rnd)
def mpc_zeta(s, prec, rnd=round_fast, alt=0, force=False):
re, im = s
if im == fzero:
return mpf_zeta(re, prec, rnd, alt), fzero
# slow for large s
if (not force) and mpf_gt(mpc_abs(s, 10), from_int(prec)):
raise NotImplementedError
wp = prec + 20
# Near pole
r = mpc_sub(mpc_one, s, wp)
asign, aman, aexp, abc = mpc_abs(r, 10)
pole_dist = -2*(aexp+abc)
if pole_dist > wp:
if alt:
q = mpf_ln2(wp)
y = mpf_mul(q, mpf_euler(wp), wp)
g = mpf_shift(mpf_mul(q, q, wp), -1)
g = mpf_sub(y, g)
z = mpc_mul_mpf(r, mpf_neg(g), wp)
z = mpc_add_mpf(z, q, wp)
return mpc_pos(z, prec, rnd)
else:
q = mpc_neg(mpc_div(mpc_one, r, wp))
q = mpc_add_mpf(q, mpf_euler(wp), wp)
return mpc_pos(q, prec, rnd)
else:
wp += max(0, pole_dist)
# Reflection formula. To be rigorous, we should reflect to the left of
# re = 1/2 (see comments for mpf_zeta), but this leads to unnecessary
# slowdown for interesting values of s
if mpf_lt(re, fzero):
# XXX: could use the separate refl. formula for Dirichlet eta
if alt:
q = mpc_sub(mpc_one, mpc_pow(mpc_two, mpc_sub(mpc_one, s, wp),
wp), wp)
return mpc_mul(mpc_zeta(s, wp), q, prec, rnd)
# XXX: -1 should be done exactly
y = mpc_sub(mpc_one, s, 10*wp)
a = mpc_gamma(y, wp)
b = mpc_zeta(y, wp)
c = mpc_sin_pi(mpc_shift(s, -1), wp)
rsign, rman, rexp, rbc = re
isign, iman, iexp, ibc = im
mag = max(rexp+rbc, iexp+ibc)
wp2 = wp + max(0, mag)
pi = mpf_pi(wp+wp2)
pi2 = (mpf_shift(pi, 1), fzero)
d = mpc_div_mpf(mpc_pow(pi2, s, wp2), pi, wp2)
return mpc_mul(a,mpc_mul(b,mpc_mul(c,d,wp),wp),prec,rnd)
n = int(wp/2.54 + 5)
n += int(0.9*abs(to_int(im)))
d = borwein_coefficients(n)
ref = to_fixed(re, wp)
imf = to_fixed(im, wp)
tre = MPZ_ZERO
tim = MPZ_ZERO
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
critical_line = re == fhalf
ln2 = ln2_fixed(wp)
pi2 = pi_fixed(wp-1)
wp2 = wp+wp
for k in xrange(n):
log = log_int_fixed(k+1, wp, ln2)
# A square root is much cheaper than an exp
if critical_line:
w = one_2wp // isqrt_fast((k+1) << wp2)
else:
w = exp_fixed((-ref*log) >> wp, wp)
if k & 1:
w *= (d[n] - d[k])
else:
w *= (d[k] - d[n])
wre, wim = cos_sin_fixed((-imf*log)>>wp, wp, pi2)
tre += (w * wre) >> wp
tim += (w * wim) >> wp
tre //= (-d[n])
tim //= (-d[n])
tre = from_man_exp(tre, -wp, wp)
tim = from_man_exp(tim, -wp, wp)
if alt:
return mpc_pos((tre, tim), prec, rnd)
else:
q = mpc_sub(mpc_one, mpc_pow(mpc_two, r, wp), wp)
return mpc_div((tre, tim), q, prec, rnd)
def mpf_altzeta(s, prec, rnd=round_fast):
return mpf_zeta(s, prec, rnd, 1)
def mpc_altzeta(s, prec, rnd=round_fast):
return mpc_zeta(s, prec, rnd, 1)
# Not optimized currently
mpf_zetasum = None
def pow_fixed(x, n, wp):
if n == 1:
return x
y = MPZ_ONE << wp
while n:
if n & 1:
y = (y*x) >> wp
n -= 1
x = (x*x) >> wp
n //= 2
return y
# TODO: optimize / cleanup interface / unify with list_primes
sieve_cache = []
primes_cache = []
mult_cache = []
def primesieve(n):
global sieve_cache, primes_cache, mult_cache
if n < len(sieve_cache):
sieve = sieve_cache#[:n+1]
primes = primes_cache[:primes_cache.index(max(sieve))+1]
mult = mult_cache#[:n+1]
return sieve, primes, mult
sieve = [0] * (n+1)
mult = [0] * (n+1)
primes = list_primes(n)
for p in primes:
#sieve[p::p] = p
for k in xrange(p,n+1,p):
sieve[k] = p
for i, p in enumerate(sieve):
if i >= 2:
m = 1
n = i // p
while not n % p:
n //= p
m += 1
mult[i] = m
sieve_cache = sieve
primes_cache = primes
mult_cache = mult
return sieve, primes, mult
def zetasum_sieved(critical_line, sre, sim, a, n, wp):
if a < 1:
raise ValueError("a cannot be less than 1")
sieve, primes, mult = primesieve(a+n)
basic_powers = {}
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
wp2 = wp+wp
ln2 = ln2_fixed(wp)
pi2 = pi_fixed(wp-1)
for p in primes:
if p*2 > a+n:
break
log = log_int_fixed(p, wp, ln2)
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
if critical_line:
u = one_2wp // isqrt_fast(p<<wp2)
else:
u = exp_fixed((-sre*log)>>wp, wp)
pre = (u*cos) >> wp
pim = (u*sin) >> wp
basic_powers[p] = [(pre, pim)]
tre, tim = pre, pim
for m in range(1,int(math.log(a+n,p)+0.01)+1):
tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
basic_powers[p].append((tre,tim))
xre = MPZ_ZERO
xim = MPZ_ZERO
if a == 1:
xre += one
aa = max(a,2)
for k in xrange(aa, a+n+1):
p = sieve[k]
if p in basic_powers:
m = mult[k]
tre, tim = basic_powers[p][m-1]
while 1:
k //= p**m
if k == 1:
break
p = sieve[k]
m = mult[k]
pre, pim = basic_powers[p][m-1]
tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
else:
log = log_int_fixed(k, wp, ln2)
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
if critical_line:
u = one_2wp // isqrt_fast(k<<wp2)
else:
u = exp_fixed((-sre*log)>>wp, wp)
tre = (u*cos) >> wp
tim = (u*sin) >> wp
xre += tre
xim += tim
return xre, xim
# Set to something large to disable
ZETASUM_SIEVE_CUTOFF = 10
def mpc_zetasum(s, a, n, derivatives, reflect, prec):
"""
Fast version of mp._zetasum, assuming s = complex, a = integer.
"""
wp = prec + 10
have_derivatives = derivatives != [0]
have_one_derivative = len(derivatives) == 1
# parse s
sre, sim = s
critical_line = (sre == fhalf)
sre = to_fixed(sre, wp)
sim = to_fixed(sim, wp)
if a > 0 and n > ZETASUM_SIEVE_CUTOFF and not have_derivatives and not reflect:
re, im = zetasum_sieved(critical_line, sre, sim, a, n, wp)
xs = [(from_man_exp(re, -wp, prec, 'n'), from_man_exp(im, -wp, prec, 'n'))]
return xs, []
maxd = max(derivatives)
if not have_one_derivative:
derivatives = range(maxd+1)
# x_d = 0, y_d = 0
xre = [MPZ_ZERO for d in derivatives]
xim = [MPZ_ZERO for d in derivatives]
if reflect:
yre = [MPZ_ZERO for d in derivatives]
yim = [MPZ_ZERO for d in derivatives]
else:
yre = yim = []
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
ln2 = ln2_fixed(wp)
pi2 = pi_fixed(wp-1)
wp2 = wp+wp
for w in xrange(a, a+n+1):
log = log_int_fixed(w, wp, ln2)
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
if critical_line:
u = one_2wp // isqrt_fast(w<<wp2)
else:
u = exp_fixed((-sre*log)>>wp, wp)
xterm_re = (u * cos) >> wp
xterm_im = (u * sin) >> wp
if reflect:
reciprocal = (one_2wp // (u*w))
yterm_re = (reciprocal * cos) >> wp
yterm_im = (reciprocal * sin) >> wp
if have_derivatives:
if have_one_derivative:
log = pow_fixed(log, maxd, wp)
xre[0] += (xterm_re * log) >> wp
xim[0] += (xterm_im * log) >> wp
if reflect:
yre[0] += (yterm_re * log) >> wp
yim[0] += (yterm_im * log) >> wp
else:
t = MPZ_ONE << wp
for d in derivatives:
xre[d] += (xterm_re * t) >> wp
xim[d] += (xterm_im * t) >> wp
if reflect:
yre[d] += (yterm_re * t) >> wp
yim[d] += (yterm_im * t) >> wp
t = (t * log) >> wp
else:
xre[0] += xterm_re
xim[0] += xterm_im
if reflect:
yre[0] += yterm_re
yim[0] += yterm_im
if have_derivatives:
if have_one_derivative:
if maxd % 2:
xre[0] = -xre[0]
xim[0] = -xim[0]
if reflect:
yre[0] = -yre[0]
yim[0] = -yim[0]
else:
xre = [(-1)**d * xre[d] for d in derivatives]
xim = [(-1)**d * xim[d] for d in derivatives]
if reflect:
yre = [(-1)**d * yre[d] for d in derivatives]
yim = [(-1)**d * yim[d] for d in derivatives]
xs = [(from_man_exp(xa, -wp, prec, 'n'), from_man_exp(xb, -wp, prec, 'n'))
for (xa, xb) in zip(xre, xim)]
ys = [(from_man_exp(ya, -wp, prec, 'n'), from_man_exp(yb, -wp, prec, 'n'))
for (ya, yb) in zip(yre, yim)]
return xs, ys
#-----------------------------------------------------------------------#
# #
# The gamma function (NEW IMPLEMENTATION) #
# #
#-----------------------------------------------------------------------#
# Higher means faster, but more precomputation time
MAX_GAMMA_TAYLOR_PREC = 5000
# Need to derive higher bounds for Taylor series to go higher
assert MAX_GAMMA_TAYLOR_PREC < 15000
# Use Stirling's series if abs(x) > beta*prec
# Important: must be large enough for convergence!
GAMMA_STIRLING_BETA = 0.2
SMALL_FACTORIAL_CACHE_SIZE = 150
gamma_taylor_cache = {}
gamma_stirling_cache = {}
small_factorial_cache = [from_int(ifac(n)) for \
n in range(SMALL_FACTORIAL_CACHE_SIZE+1)]
def zeta_array(N, prec):
"""
zeta(n) = A * pi**n / n! + B
where A is a rational number (A = Bernoulli number
for n even) and B is an infinite sum over powers of exp(2*pi).
(B = 0 for n even).
TODO: this is currently only used for gamma, but could
be very useful elsewhere.
"""
extra = 30
wp = prec+extra
zeta_values = [MPZ_ZERO] * (N+2)
pi = pi_fixed(wp)
# STEP 1:
one = MPZ_ONE << wp
zeta_values[0] = -one//2
f_2pi = mpf_shift(mpf_pi(wp),1)
exp_2pi_k = exp_2pi = mpf_exp(f_2pi, wp)
# Compute exponential series
# Store values of 1/(exp(2*pi*k)-1),
# exp(2*pi*k)/(exp(2*pi*k)-1)**2, 1/(exp(2*pi*k)-1)**2
# pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
exps3 = []
k = 1
while 1:
tp = wp - 9*k
if tp < 1:
break
# 1/(exp(2*pi*k-1)
q1 = mpf_div(fone, mpf_sub(exp_2pi_k, fone, tp), tp)
# pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
q2 = mpf_mul(exp_2pi_k, mpf_mul(q1,q1,tp), tp)
q1 = to_fixed(q1, wp)
q2 = to_fixed(q2, wp)
q2 = (k * q2 * pi) >> wp
exps3.append((q1, q2))
# Multiply for next round
exp_2pi_k = mpf_mul(exp_2pi_k, exp_2pi, wp)
k += 1
# Exponential sum
for n in xrange(3, N+1, 2):
s = MPZ_ZERO
k = 1
for e1, e2 in exps3:
if n%4 == 3:
t = e1 // k**n
else:
U = (n-1)//4
t = (e1 + e2//U) // k**n
if not t:
break
s += t
k += 1
zeta_values[n] = -2*s
# Even zeta values
B = [mpf_abs(mpf_bernoulli(k,wp)) for k in xrange(N+2)]
pi_pow = fpi = mpf_pow_int(mpf_shift(mpf_pi(wp), 1), 2, wp)
pi_pow = mpf_div(pi_pow, from_int(4), wp)
for n in xrange(2,N+2,2):
z = mpf_mul(B[n], pi_pow, wp)
zeta_values[n] = to_fixed(z, wp)
pi_pow = mpf_mul(pi_pow, fpi, wp)
pi_pow = mpf_div(pi_pow, from_int((n+1)*(n+2)), wp)
# Zeta sum
reciprocal_pi = (one << wp) // pi
for n in xrange(3, N+1, 4):
U = (n-3)//4
s = zeta_values[4*U+4]*(4*U+7)//4
for k in xrange(1, U+1):
s -= (zeta_values[4*k] * zeta_values[4*U+4-4*k]) >> wp
zeta_values[n] += (2*s*reciprocal_pi) >> wp
for n in xrange(5, N+1, 4):
U = (n-1)//4
s = zeta_values[4*U+2]*(2*U+1)
for k in xrange(1, 2*U+1):
s += ((-1)**k*2*k* zeta_values[2*k] * zeta_values[4*U+2-2*k])>>wp
zeta_values[n] += ((s*reciprocal_pi)>>wp)//(2*U)
return [x>>extra for x in zeta_values]
def gamma_taylor_coefficients(inprec):
"""
Gives the Taylor coefficients of 1/gamma(1+x) as
a list of fixed-point numbers. Enough coefficients are returned
to ensure that the series converges to the given precision
when x is in [0.5, 1.5].
"""
# Reuse nearby cache values (small case)
if inprec < 400:
prec = inprec + (10-(inprec%10))
elif inprec < 1000:
prec = inprec + (30-(inprec%30))
else:
prec = inprec
if prec in gamma_taylor_cache:
return gamma_taylor_cache[prec], prec
# Experimentally determined bounds
if prec < 1000:
N = int(prec**0.76 + 2)
else:
# Valid to at least 15000 bits
N = int(prec**0.787 + 2)
# Reuse higher precision values
for cprec in gamma_taylor_cache:
if cprec > prec:
coeffs = [x>>(cprec-prec) for x in gamma_taylor_cache[cprec][-N:]]
if inprec < 1000:
gamma_taylor_cache[prec] = coeffs
return coeffs, prec
# Cache at a higher precision (large case)
if prec > 1000:
prec = int(prec * 1.2)
wp = prec + 20
A = [0] * N
A[0] = MPZ_ZERO
A[1] = MPZ_ONE << wp
A[2] = euler_fixed(wp)
# SLOW, reference implementation
#zeta_values = [0,0]+[to_fixed(mpf_zeta_int(k,wp),wp) for k in xrange(2,N)]
zeta_values = zeta_array(N, wp)
for k in xrange(3, N):
a = (-A[2]*A[k-1])>>wp
for j in xrange(2,k):
a += ((-1)**j * zeta_values[j] * A[k-j]) >> wp
a //= (1-k)
A[k] = a
A = [a>>20 for a in A]
A = A[::-1]
A = A[:-1]
gamma_taylor_cache[prec] = A
#return A, prec
return gamma_taylor_coefficients(inprec)
def gamma_fixed_taylor(xmpf, x, wp, prec, rnd, type):
# Determine nearest multiple of N/2
#n = int(x >> (wp-1))
#steps = (n-1)>>1
nearest_int = ((x >> (wp-1)) + MPZ_ONE) >> 1
one = MPZ_ONE << wp
coeffs, cwp = gamma_taylor_coefficients(wp)
if nearest_int > 0:
r = one
for i in xrange(nearest_int-1):
x -= one
r = (r*x) >> wp
x -= one
p = MPZ_ZERO
for c in coeffs:
p = c + ((x*p)>>wp)
p >>= (cwp-wp)
if type == 0:
return from_man_exp((r<<wp)//p, -wp, prec, rnd)
if type == 2:
return mpf_shift(from_rational(p, (r<<wp), prec, rnd), wp)
if type == 3:
return mpf_log(mpf_abs(from_man_exp((r<<wp)//p, -wp)), prec, rnd)
else:
r = one
for i in xrange(-nearest_int):
r = (r*x) >> wp
x += one
p = MPZ_ZERO
for c in coeffs:
p = c + ((x*p)>>wp)
p >>= (cwp-wp)
if wp - bitcount(abs(x)) > 10:
# pass very close to 0, so do floating-point multiply
g = mpf_add(xmpf, from_int(-nearest_int)) # exact
r = from_man_exp(p*r,-wp-wp)
r = mpf_mul(r, g, wp)
if type == 0:
return mpf_div(fone, r, prec, rnd)
if type == 2:
return mpf_pos(r, prec, rnd)
if type == 3:
return mpf_log(mpf_abs(mpf_div(fone, r, wp)), prec, rnd)
else:
r = from_man_exp(x*p*r,-3*wp)
if type == 0: return mpf_div(fone, r, prec, rnd)
if type == 2: return mpf_pos(r, prec, rnd)
if type == 3: return mpf_neg(mpf_log(mpf_abs(r), prec, rnd))
def stirling_coefficient(n):
if n in gamma_stirling_cache:
return gamma_stirling_cache[n]
p, q = bernfrac(n)
q *= MPZ(n*(n-1))
gamma_stirling_cache[n] = p, q, bitcount(abs(p)), bitcount(q)
return gamma_stirling_cache[n]
def real_stirling_series(x, prec):
"""
Sums the rational part of Stirling's expansion,
log(sqrt(2*pi)) - z + 1/(12*z) - 1/(360*z^3) + ...
"""
t = (MPZ_ONE<<(prec+prec)) // x # t = 1/x
u = (t*t)>>prec # u = 1/x**2
s = ln_sqrt2pi_fixed(prec) - x
# Add initial terms of Stirling's series
s += t//12; t = (t*u)>>prec
s -= t//360; t = (t*u)>>prec
s += t//1260; t = (t*u)>>prec
s -= t//1680; t = (t*u)>>prec
if not t: return s
s += t//1188; t = (t*u)>>prec
s -= 691*t//360360; t = (t*u)>>prec
s += t//156; t = (t*u)>>prec
if not t: return s
s -= 3617*t//122400; t = (t*u)>>prec
s += 43867*t//244188; t = (t*u)>>prec
s -= 174611*t//125400; t = (t*u)>>prec
if not t: return s
k = 22
# From here on, the coefficients are growing, so we
# have to keep t at a roughly constant size
usize = bitcount(abs(u))
tsize = bitcount(abs(t))
texp = 0
while 1:
p, q, pb, qb = stirling_coefficient(k)
term_mag = tsize + pb + texp
shift = -texp
m = pb - term_mag
if m > 0 and shift < m:
p >>= m
shift -= m
m = tsize - term_mag
if m > 0 and shift < m:
w = t >> m
shift -= m
else:
w = t
term = (t*p//q) >> shift
if not term:
break
s += term
t = (t*u) >> usize
texp -= (prec - usize)
k += 2
return s
def complex_stirling_series(x, y, prec):
# t = 1/z
_m = (x*x + y*y) >> prec
tre = (x << prec) // _m
tim = (-y << prec) // _m
# u = 1/z**2
ure = (tre*tre - tim*tim) >> prec
uim = tim*tre >> (prec-1)
# s = log(sqrt(2*pi)) - z
sre = ln_sqrt2pi_fixed(prec) - x
sim = -y
# Add initial terms of Stirling's series
sre += tre//12; sim += tim//12;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= tre//360; sim -= tim//360;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre += tre//1260; sim += tim//1260;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= tre//1680; sim -= tim//1680;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
if abs(tre) + abs(tim) < 5: return sre, sim
sre += tre//1188; sim += tim//1188;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= 691*tre//360360; sim -= 691*tim//360360;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre += tre//156; sim += tim//156;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
if abs(tre) + abs(tim) < 5: return sre, sim
sre -= 3617*tre//122400; sim -= 3617*tim//122400;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre += 43867*tre//244188; sim += 43867*tim//244188;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= 174611*tre//125400; sim -= 174611*tim//125400;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
if abs(tre) + abs(tim) < 5: return sre, sim
k = 22
# From here on, the coefficients are growing, so we
# have to keep t at a roughly constant size
usize = bitcount(max(abs(ure), abs(uim)))
tsize = bitcount(max(abs(tre), abs(tim)))
texp = 0
while 1:
p, q, pb, qb = stirling_coefficient(k)
term_mag = tsize + pb + texp
shift = -texp
m = pb - term_mag
if m > 0 and shift < m:
p >>= m
shift -= m
m = tsize - term_mag
if m > 0 and shift < m:
wre = tre >> m
wim = tim >> m
shift -= m
else:
wre = tre
wim = tim
termre = (tre*p//q) >> shift
termim = (tim*p//q) >> shift
if abs(termre) + abs(termim) < 5:
break
sre += termre
sim += termim
tre, tim = ((tre*ure - tim*uim)>>usize), \
((tre*uim + tim*ure)>>usize)
texp -= (prec - usize)
k += 2
return sre, sim
def mpf_gamma(x, prec, rnd='d', type=0):
"""
This function implements multipurpose evaluation of the gamma
function, G(x), as well as the following versions of the same:
type = 0 -- G(x) [standard gamma function]
type = 1 -- G(x+1) = x*G(x+1) = x! [factorial]
type = 2 -- 1/G(x) [reciprocal gamma function]
type = 3 -- log(|G(x)|) [log-gamma function, real part]
"""
# Specal values
sign, man, exp, bc = x
if not man:
if x == fzero:
if type == 1: return fone
if type == 2: return fzero
raise ValueError("gamma function pole")
if x == finf:
if type == 2: return fzero
return finf
return fnan
# First of all, for log gamma, numbers can be well beyond the fixed-point
# range, so we must take care of huge numbers before e.g. trying
# to convert x to the nearest integer
if type == 3:
wp = prec+20
if exp+bc > wp and not sign:
return mpf_sub(mpf_mul(x, mpf_log(x, wp), wp), x, prec, rnd)
# We strongly want to special-case small integers
is_integer = exp >= 0
if is_integer:
# Poles
if sign:
if type == 2:
return fzero
raise ValueError("gamma function pole")
# n = x
n = man << exp
if n < SMALL_FACTORIAL_CACHE_SIZE:
if type == 0:
return mpf_pos(small_factorial_cache[n-1], prec, rnd)
if type == 1:
return mpf_pos(small_factorial_cache[n], prec, rnd)
if type == 2:
return mpf_div(fone, small_factorial_cache[n-1], prec, rnd)
if type == 3:
return mpf_log(small_factorial_cache[n-1], prec, rnd)
else:
# floor(abs(x))
n = int(man >> (-exp))
# Estimate size and precision
# Estimate log(gamma(|x|),2) as x*log(x,2)
mag = exp + bc
gamma_size = n*mag
if type == 3:
wp = prec + 20
else:
wp = prec + bitcount(gamma_size) + 20
# Very close to 0, pole
if mag < -wp:
if type == 0:
return mpf_sub(mpf_div(fone,x, wp),mpf_shift(fone,-wp),prec,rnd)
if type == 1: return mpf_sub(fone, x, prec, rnd)
if type == 2: return mpf_add(x, mpf_shift(fone,mag-wp), prec, rnd)
if type == 3: return mpf_neg(mpf_log(mpf_abs(x), prec, rnd))
# From now on, we assume having a gamma function
if type == 1:
return mpf_gamma(mpf_add(x, fone), prec, rnd, 0)
# Special case integers (those not small enough to be caught above,
# but still small enough for an exact factorial to be faster
# than an approximate algorithm), and half-integers
if exp >= -1:
if is_integer:
if gamma_size < 10*wp:
if type == 0:
return from_int(ifac(n-1), prec, rnd)
if type == 2:
return from_rational(MPZ_ONE, ifac(n-1), prec, rnd)
if type == 3:
return mpf_log(from_int(ifac(n-1)), prec, rnd)
# half-integer
if n < 100 or gamma_size < 10*wp:
if sign:
w = sqrtpi_fixed(wp)
if n % 2: f = ifac2(2*n+1)
else: f = -ifac2(2*n+1)
if type == 0:
return mpf_shift(from_rational(w, f, prec, rnd), -wp+n+1)
if type == 2:
return mpf_shift(from_rational(f, w, prec, rnd), wp-n-1)
if type == 3:
return mpf_log(mpf_shift(from_rational(w, abs(f),
prec, rnd), -wp+n+1), prec, rnd)
elif n == 0:
if type == 0: return mpf_sqrtpi(prec, rnd)
if type == 2: return mpf_div(fone, mpf_sqrtpi(wp), prec, rnd)
if type == 3: return mpf_log(mpf_sqrtpi(wp), prec, rnd)
else:
w = sqrtpi_fixed(wp)
w = from_man_exp(w * ifac2(2*n-1), -wp-n)
if type == 0: return mpf_pos(w, prec, rnd)
if type == 2: return mpf_div(fone, w, prec, rnd)
if type == 3: return mpf_log(mpf_abs(w), prec, rnd)
# Convert to fixed point
offset = exp + wp
if offset >= 0: absxman = man << offset
else: absxman = man >> (-offset)
# For log gamma, provide accurate evaluation for x = 1+eps and 2+eps
if type == 3 and not sign:
one = MPZ_ONE << wp
one_dist = abs(absxman-one)
two_dist = abs(absxman-2*one)
cancellation = (wp - bitcount(min(one_dist, two_dist)))
if cancellation > 10:
xsub1 = mpf_sub(fone, x)
xsub2 = mpf_sub(ftwo, x)
xsub1mag = xsub1[2]+xsub1[3]
xsub2mag = xsub2[2]+xsub2[3]
if xsub1mag < -wp:
return mpf_mul(mpf_euler(wp), mpf_sub(fone, x), prec, rnd)
if xsub2mag < -wp:
return mpf_mul(mpf_sub(fone, mpf_euler(wp)),
mpf_sub(x, ftwo), prec, rnd)
# Proceed but increase precision
wp += max(-xsub1mag, -xsub2mag)
offset = exp + wp
if offset >= 0: absxman = man << offset
else: absxman = man >> (-offset)
# Use Taylor series if appropriate
n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
if n < max(100, n_for_stirling) and wp < MAX_GAMMA_TAYLOR_PREC:
if sign:
absxman = -absxman
return gamma_fixed_taylor(x, absxman, wp, prec, rnd, type)
# Use Stirling's series
# First ensure that |x| is large enough for rapid convergence
xorig = x
# Argument reduction
r = 0
if n < n_for_stirling:
r = one = MPZ_ONE << wp
d = n_for_stirling - n
for k in xrange(d):
r = (r * absxman) >> wp
absxman += one
x = xabs = from_man_exp(absxman, -wp)
if sign:
x = mpf_neg(x)
else:
xabs = mpf_abs(x)
# Asymptotic series
y = real_stirling_series(absxman, wp)
u = to_fixed(mpf_log(xabs, wp), wp)
u = ((absxman - (MPZ_ONE<<(wp-1))) * u) >> wp
y += u
w = from_man_exp(y, -wp)
# Compute final value
if sign:
# Reflection formula
A = mpf_mul(mpf_sin_pi(xorig, wp), xorig, wp)
B = mpf_neg(mpf_pi(wp))
if type == 0 or type == 2:
A = mpf_mul(A, mpf_exp(w, wp))
if r:
B = mpf_mul(B, from_man_exp(r, -wp), wp)
if type == 0:
return mpf_div(B, A, prec, rnd)
if type == 2:
return mpf_div(A, B, prec, rnd)
if type == 3:
if r:
B = mpf_mul(B, from_man_exp(r, -wp), wp)
A = mpf_add(mpf_log(mpf_abs(A), wp), w, wp)
return mpf_sub(mpf_log(mpf_abs(B), wp), A, prec, rnd)
else:
if type == 0:
if r:
return mpf_div(mpf_exp(w, wp),
from_man_exp(r, -wp), prec, rnd)
return mpf_exp(w, prec, rnd)
if type == 2:
if r:
return mpf_div(from_man_exp(r, -wp),
mpf_exp(w, wp), prec, rnd)
return mpf_exp(mpf_neg(w), prec, rnd)
if type == 3:
if r:
return mpf_sub(w, mpf_log(from_man_exp(r,-wp), wp), prec, rnd)
return mpf_pos(w, prec, rnd)
def mpc_gamma(z, prec, rnd='d', type=0):
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero:
# Imaginary part on negative half-axis for log-gamma function
if type == 3 and asign:
re = mpf_gamma(a, prec, rnd, 3)
n = (-aman) >> (-aexp)
im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
return re, im
return mpf_gamma(a, prec, rnd, type), fzero
# Some kind of complex inf/nan
if (not aman and aexp) or (not bman and bexp):
return (fnan, fnan)
# Initial working precision
wp = prec + 20
amag = aexp+abc
bmag = bexp+bbc
if aman:
mag = max(amag, bmag)
else:
mag = bmag
# Close to 0
if mag < -8:
if mag < -wp:
# 1/gamma(z) = z + euler*z^2 + O(z^3)
v = mpc_add(z, mpc_mul_mpf(mpc_mul(z,z,wp),mpf_euler(wp),wp), wp)
if type == 0: return mpc_reciprocal(v, prec, rnd)
if type == 1: return mpc_div(z, v, prec, rnd)
if type == 2: return mpc_pos(v, prec, rnd)
if type == 3: return mpc_log(mpc_reciprocal(v, prec), prec, rnd)
elif type != 1:
wp += (-mag)
# Handle huge log-gamma values; must do this before converting to
# a fixed-point value. TODO: determine a precise cutoff of validity
# depending on amag and bmag
if type == 3 and mag > wp and ((not asign) or (bmag >= amag)):
return mpc_sub(mpc_mul(z, mpc_log(z, wp), wp), z, prec, rnd)
# From now on, we assume having a gamma function
if type == 1:
return mpc_gamma((mpf_add(a, fone), b), prec, rnd, 0)
an = abs(to_int(a))
bn = abs(to_int(b))
absn = max(an, bn)
gamma_size = absn*mag
if type == 3:
pass
else:
wp += bitcount(gamma_size)
# Reflect to the right half-plane. Note that Stirling's expansion
# is valid in the left half-plane too, as long as we're not too close
# to the real axis, but in order to use this argument reduction
# in the negative direction must be implemented.
#need_reflection = asign and ((bmag < 0) or (amag-bmag > 4))
need_reflection = asign
zorig = z
if need_reflection:
z = mpc_neg(z)
asign, aman, aexp, abc = a = z[0]
bsign, bman, bexp, bbc = b = z[1]
# Imaginary part very small compared to real one?
yfinal = 0
balance_prec = 0
if bmag < -10:
# Check z ~= 1 and z ~= 2 for loggamma
if type == 3:
zsub1 = mpc_sub_mpf(z, fone)
if zsub1[0] == fzero:
cancel1 = -bmag
else:
cancel1 = -max(zsub1[0][2]+zsub1[0][3], bmag)
if cancel1 > wp:
pi = mpf_pi(wp)
x = mpc_mul_mpf(zsub1, pi, wp)
x = mpc_mul(x, x, wp)
x = mpc_div_mpf(x, from_int(12), wp)
y = mpc_mul_mpf(zsub1, mpf_neg(mpf_euler(wp)), wp)
yfinal = mpc_add(x, y, wp)
if not need_reflection:
return mpc_pos(yfinal, prec, rnd)
elif cancel1 > 0:
wp += cancel1
zsub2 = mpc_sub_mpf(z, ftwo)
if zsub2[0] == fzero:
cancel2 = -bmag
else:
cancel2 = -max(zsub2[0][2]+zsub2[0][3], bmag)
if cancel2 > wp:
pi = mpf_pi(wp)
t = mpf_sub(mpf_mul(pi, pi), from_int(6))
x = mpc_mul_mpf(mpc_mul(zsub2, zsub2, wp), t, wp)
x = mpc_div_mpf(x, from_int(12), wp)
y = mpc_mul_mpf(zsub2, mpf_sub(fone, mpf_euler(wp)), wp)
yfinal = mpc_add(x, y, wp)
if not need_reflection:
return mpc_pos(yfinal, prec, rnd)
elif cancel2 > 0:
wp += cancel2
if bmag < -wp:
# Compute directly from the real gamma function.
pp = 2*(wp+10)
aabs = mpf_abs(a)
eps = mpf_shift(fone, amag-wp)
x1 = mpf_gamma(aabs, pp, type=type)
x2 = mpf_gamma(mpf_add(aabs, eps), pp, type=type)
xprime = mpf_div(mpf_sub(x2, x1, pp), eps, pp)
y = mpf_mul(b, xprime, prec, rnd)
yfinal = (x1, y)
# Note: we still need to use the reflection formula for
# near-poles, and the correct branch of the log-gamma function
if not need_reflection:
return mpc_pos(yfinal, prec, rnd)
else:
balance_prec += (-bmag)
wp += balance_prec
n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
need_reduction = absn < n_for_stirling
afix = to_fixed(a, wp)
bfix = to_fixed(b, wp)
r = 0
if not yfinal:
zprered = z
# Argument reduction
if absn < n_for_stirling:
absn = complex(an, bn)
d = int((1 + n_for_stirling**2 - bn**2)**0.5 - an)
rre = one = MPZ_ONE << wp
rim = MPZ_ZERO
for k in xrange(d):
rre, rim = ((afix*rre-bfix*rim)>>wp), ((afix*rim + bfix*rre)>>wp)
afix += one
r = from_man_exp(rre, -wp), from_man_exp(rim, -wp)
a = from_man_exp(afix, -wp)
z = a, b
yre, yim = complex_stirling_series(afix, bfix, wp)
# (z-1/2)*log(z) + S
lre, lim = mpc_log(z, wp)
lre = to_fixed(lre, wp)
lim = to_fixed(lim, wp)
yre = ((lre*afix - lim*bfix)>>wp) - (lre>>1) + yre
yim = ((lre*bfix + lim*afix)>>wp) - (lim>>1) + yim
y = from_man_exp(yre, -wp), from_man_exp(yim, -wp)
if r and type == 3:
# If re(z) > 0 and abs(z) <= 4, the branches of loggamma(z)
# and log(gamma(z)) coincide. Otherwise, use the zeroth order
# Stirling expansion to compute the correct imaginary part.
y = mpc_sub(y, mpc_log(r, wp), wp)
zfa = to_float(zprered[0])
zfb = to_float(zprered[1])
zfabs = math.hypot(zfa,zfb)
#if not (zfa > 0.0 and zfabs <= 4):
yfb = to_float(y[1])
u = math.atan2(zfb, zfa)
if zfabs <= 0.5:
gi = 0.577216*zfb - u
else:
gi = -zfb - 0.5*u + zfa*u + zfb*math.log(zfabs)
n = int(math.floor((gi-yfb)/(2*math.pi)+0.5))
y = (y[0], mpf_add(y[1], mpf_mul_int(mpf_pi(wp), 2*n, wp), wp))
if need_reflection:
if type == 0 or type == 2:
A = mpc_mul(mpc_sin_pi(zorig, wp), zorig, wp)
B = (mpf_neg(mpf_pi(wp)), fzero)
if yfinal:
if type == 2:
A = mpc_div(A, yfinal, wp)
else:
A = mpc_mul(A, yfinal, wp)
else:
A = mpc_mul(A, mpc_exp(y, wp), wp)
if r:
B = mpc_mul(B, r, wp)
if type == 0: return mpc_div(B, A, prec, rnd)
if type == 2: return mpc_div(A, B, prec, rnd)
# Reflection formula for the log-gamma function with correct branch
# http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0006/
# LogGamma[z] == -LogGamma[-z] - Log[-z] +
# Sign[Im[z]] Floor[Re[z]] Pi I + Log[Pi] -
# Log[Sin[Pi (z - Floor[Re[z]])]] -
# Pi I (1 - Abs[Sign[Im[z]]]) Abs[Floor[Re[z]]]
if type == 3:
if yfinal:
s1 = mpc_neg(yfinal)
else:
s1 = mpc_neg(y)
# s -= log(-z)
s1 = mpc_sub(s1, mpc_log(mpc_neg(zorig), wp), wp)
# floor(re(z))
rezfloor = mpf_floor(zorig[0])
imzsign = mpf_sign(zorig[1])
pi = mpf_pi(wp)
t = mpf_mul(pi, rezfloor)
t = mpf_mul_int(t, imzsign, wp)
s1 = (s1[0], mpf_add(s1[1], t, wp))
s1 = mpc_add_mpf(s1, mpf_log(pi, wp), wp)
t = mpc_sin_pi(mpc_sub_mpf(zorig, rezfloor), wp)
t = mpc_log(t, wp)
s1 = mpc_sub(s1, t, wp)
# Note: may actually be unused, because we fall back
# to the mpf_ function for real arguments
if not imzsign:
t = mpf_mul(pi, mpf_floor(rezfloor), wp)
s1 = (s1[0], mpf_sub(s1[1], t, wp))
return mpc_pos(s1, prec, rnd)
else:
if type == 0:
if r:
return mpc_div(mpc_exp(y, wp), r, prec, rnd)
return mpc_exp(y, prec, rnd)
if type == 2:
if r:
return mpc_div(r, mpc_exp(y, wp), prec, rnd)
return mpc_exp(mpc_neg(y), prec, rnd)
if type == 3:
return mpc_pos(y, prec, rnd)
def mpf_factorial(x, prec, rnd='d'):
return mpf_gamma(x, prec, rnd, 1)
def mpc_factorial(x, prec, rnd='d'):
return mpc_gamma(x, prec, rnd, 1)
def mpf_rgamma(x, prec, rnd='d'):
return mpf_gamma(x, prec, rnd, 2)
def mpc_rgamma(x, prec, rnd='d'):
return mpc_gamma(x, prec, rnd, 2)
def mpf_loggamma(x, prec, rnd='d'):
sign, man, exp, bc = x
if sign:
raise ComplexResult
return mpf_gamma(x, prec, rnd, 3)
def mpc_loggamma(z, prec, rnd='d'):
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero and asign:
re = mpf_gamma(a, prec, rnd, 3)
n = (-aman) >> (-aexp)
im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
return re, im
return mpc_gamma(z, prec, rnd, 3)
def mpf_gamma_int(n, prec, rnd=round_fast):
if n < SMALL_FACTORIAL_CACHE_SIZE:
return mpf_pos(small_factorial_cache[n-1], prec, rnd)
return mpf_gamma(from_int(n), prec, rnd)
| bsd-3-clause |
jsma/django-cms | cms/models/permissionmodels.py | 3 | 9908 | # -*- coding: utf-8 -*-
from django.apps import apps
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group, UserManager
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.models import Page
from cms.models.managers import (PagePermissionManager,
GlobalPagePermissionManager)
from cms.utils.helpers import reversion_register
# Cannot use contrib.auth.get_user_model() at compile time.
user_app_name, user_model_name = settings.AUTH_USER_MODEL.rsplit('.', 1)
User = None
try:
User = apps.get_registered_model(user_app_name, user_model_name)
except KeyError:
pass
if User is None:
raise ImproperlyConfigured(
"You have defined a custom user model %s, but the app %s is not "
"in settings.INSTALLED_APPS" % (settings.AUTH_USER_MODEL, user_app_name)
)
# NOTE: those are not just numbers!! we will do binary AND on them,
# so pay attention when adding/changing them, or MASKs..
ACCESS_PAGE = 1
ACCESS_CHILDREN = 2 # just immediate children (1 level)
ACCESS_PAGE_AND_CHILDREN = 3 # just immediate children (1 level)
ACCESS_DESCENDANTS = 4
ACCESS_PAGE_AND_DESCENDANTS = 5
# binary masks for ACCESS permissions
MASK_PAGE = 1
MASK_CHILDREN = 2
MASK_DESCENDANTS = 4
ACCESS_CHOICES = (
(ACCESS_PAGE, _('Current page')),
(ACCESS_CHILDREN, _('Page children (immediate)')),
(ACCESS_PAGE_AND_CHILDREN, _('Page and children (immediate)')),
(ACCESS_DESCENDANTS, _('Page descendants')),
(ACCESS_PAGE_AND_DESCENDANTS, _('Page and descendants')),
)
class AbstractPagePermission(models.Model):
"""Abstract page permissions
"""
# who:
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("user"), blank=True, null=True)
group = models.ForeignKey(Group, verbose_name=_("group"), blank=True, null=True)
# what:
can_change = models.BooleanField(_("can edit"), default=True)
can_add = models.BooleanField(_("can add"), default=True)
can_delete = models.BooleanField(_("can delete"), default=True)
can_change_advanced_settings = models.BooleanField(_("can change advanced settings"), default=False)
can_publish = models.BooleanField(_("can publish"), default=True)
can_change_permissions = models.BooleanField(_("can change permissions"), default=False, help_text=_("on page level"))
can_move_page = models.BooleanField(_("can move"), default=True)
can_view = models.BooleanField(_("view restricted"), default=False, help_text=_("frontend view restriction"))
class Meta:
abstract = True
app_label = 'cms'
def clean(self):
super(AbstractPagePermission, self).clean()
if not self.user and not self.group:
raise ValidationError(_('Please select user or group.'))
if self.can_change:
return
if self.can_add:
message = _("Users can't create a page without permissions "
"to change the created page. Edit permissions required.")
raise ValidationError(message)
if self.can_delete:
message = _("Users can't delete a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_publish:
message = _("Users can't publish a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_change_advanced_settings:
message = _("Users can't change page advanced settings without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_change_permissions:
message = _("Users can't change page permissions without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if self.can_move_page:
message = _("Users can't move a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
@property
def audience(self):
"""Return audience by priority, so: All or User, Group
"""
targets = filter(lambda item: item, (self.user, self.group,))
return ", ".join([force_text(t) for t in targets]) or 'No one'
def save(self, *args, **kwargs):
if not self.user and not self.group:
# don't allow `empty` objects
return
return super(AbstractPagePermission, self).save(*args, **kwargs)
def get_configured_actions(self):
actions = [action for action in self.get_permissions_by_action()
if self.has_configured_action(action)]
return actions
def has_configured_action(self, action):
permissions = self.get_permissions_by_action()[action]
return all(getattr(self, perm) for perm in permissions)
@classmethod
def get_all_permissions(cls):
perms = [
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'can_view',
]
return perms
@classmethod
def get_permissions_by_action(cls):
# Maps an action to the required flags on the
# PagePermission model or GlobalPagePermission model
permissions_by_action = {
'add_page': ['can_add', 'can_change'],
'change_page': ['can_change'],
'change_page_advanced_settings': ['can_change', 'can_change_advanced_settings'],
'change_page_permissions': ['can_change', 'can_change_permissions'],
'delete_page': ['can_change', 'can_delete'],
'delete_page_translation': ['can_change', 'can_delete'],
'move_page': ['can_change', 'can_move_page'],
'publish_page': ['can_change', 'can_publish'],
'view_page': ['can_view'],
}
return permissions_by_action
@python_2_unicode_compatible
class GlobalPagePermission(AbstractPagePermission):
"""Permissions for all pages (global).
"""
can_recover_page = models.BooleanField(
verbose_name=_("can recover pages"),
default=True,
help_text=_("can recover any deleted page"),
)
sites = models.ManyToManyField(
to=Site,
blank=True,
help_text=_('If none selected, user haves granted permissions to all sites.'),
verbose_name=_('sites'),
)
objects = GlobalPagePermissionManager()
class Meta:
verbose_name = _('Page global permission')
verbose_name_plural = _('Pages global permissions')
app_label = 'cms'
def __str__(self):
return "%s :: GLOBAL" % self.audience
@python_2_unicode_compatible
class PagePermission(AbstractPagePermission):
"""Page permissions for single page
"""
grant_on = models.IntegerField(_("Grant on"), choices=ACCESS_CHOICES, default=ACCESS_PAGE_AND_DESCENDANTS)
page = models.ForeignKey(Page, null=True, blank=True, verbose_name=_("page"))
objects = PagePermissionManager()
class Meta:
verbose_name = _('Page permission')
verbose_name_plural = _('Page permissions')
app_label = 'cms'
def __str__(self):
page = self.page_id and force_text(self.page) or "None"
return "%s :: %s has: %s" % (page, self.audience, force_text(self.get_grant_on_display()))
def clean(self):
super(PagePermission, self).clean()
if self.can_add and self.grant_on == ACCESS_PAGE:
# this is a misconfiguration - user can add/move page to current
# page but after he does this, he will not have permissions to
# access this page anymore, so avoid this.
message = _("Add page permission requires also access to children, "
"or descendants, otherwise added page can't be changed "
"by its creator.")
raise ValidationError(message)
def get_page_ids(self):
if self.grant_on & MASK_PAGE:
yield self.page_id
if self.grant_on & MASK_CHILDREN:
children = self.page.get_children().values_list('id', flat=True)
for child in children:
yield child
elif self.grant_on & MASK_DESCENDANTS:
if self.page.has_cached_descendants():
descendants = (page.pk for page in self.page.get_cached_descendants())
else:
descendants = self.page.get_descendants().values_list('id', flat=True).iterator()
for descendant in descendants:
yield descendant
class PageUserManager(UserManager):
use_in_migrations = False
class PageUser(User):
"""Cms specific user data, required for permission system
"""
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="created_users")
objects = PageUserManager()
class Meta:
verbose_name = _('User (page)')
verbose_name_plural = _('Users (page)')
app_label = 'cms'
class PageUserGroup(Group):
"""Cms specific group data, required for permission system
"""
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="created_usergroups")
class Meta:
verbose_name = _('User group (page)')
verbose_name_plural = _('User groups (page)')
app_label = 'cms'
reversion_register(PagePermission)
| bsd-3-clause |
raincoatrun/basemap | examples/testgdal.py | 4 | 2655 | """
example showing how to plot data from a DEM file and an ESRI shape file using
gdal (http://pypi.python.org/pypi/GDAL).
"""
from osgeo import gdal, ogr
from mpl_toolkits.basemap import Basemap, cm
import numpy as np
import matplotlib.pyplot as plt
from numpy import ma
# read 2.5 minute U.S. DEM file using gdal.
# (http://www.prism.oregonstate.edu/docs/meta/dem_25m.htm)
gd = gdal.Open('us_25m.dem')
array = gd.ReadAsArray()
# get lat/lon coordinates from DEM file.
coords = gd.GetGeoTransform()
nlons = array.shape[1]; nlats = array.shape[0]
delon = coords[1]
delat = coords[5]
lons = coords[0] + delon*np.arange(nlons)
lats = coords[3] + delat*np.arange(nlats)[::-1] # reverse lats
# setup figure.
fig = plt.figure(figsize=(11,6))
# setup basemap instance.
m = Basemap(llcrnrlon=-119,llcrnrlat=22,urcrnrlon=-64,urcrnrlat=49,
projection='lcc',lat_1=33,lat_2=45,lon_0=-95)
# create masked array, reversing data in latitude direction
# (so that data is oriented in increasing latitude, as transform_scalar requires).
topoin = ma.masked_values(array[::-1,:],-999.)
# transform DEM data to a 4 km native projection grid
nx = int((m.xmax-m.xmin)/4000.)+1; ny = int((m.ymax-m.ymin)/4000.)+1
topodat = m.transform_scalar(topoin,lons,lats,nx,ny,masked=True)
# plot DEM image on map.
im = m.imshow(topodat,cmap=cm.GMT_haxby_r)
# draw meridians and parallels.
m.drawparallels(np.arange(20,71,10),labels=[1,0,0,0])
m.drawmeridians(np.arange(-120,-40,10),labels=[0,0,0,1])
# plot state boundaries from shapefile using ogr.
g = ogr.Open ("st99_d00.shp")
L = g.GetLayer(0) # data is in 1st layer.
for feat in L: # iterate over features in layer
geo = feat.GetGeometryRef()
# iterate over geometries.
for count in range(geo.GetGeometryCount()):
geom = geo.GetGeometryRef(count)
if not geom.GetGeometryCount(): # just one geometry.
# get lon,lat points
lons = [geom.GetX(i) for i in range(geom.GetPointCount())]
lats = [geom.GetY(i) for i in range(geom.GetPointCount())]
# convert to map projection coords.
x, y = m(lons,lats)
# plot on map.
m.plot(x,y,'k')
else: # iterate over nested geometries.
for cnt in range( geom.GetGeometryCount()):
g = geom.GetGeometryRef( cnt )
lons = [g.GetX(i) for i in range(g.GetPointCount())]
lats = [g.GetY(i) for i in range(g.GetPointCount())]
x, y = m(lons,lats)
m.plot(x,y,'k')
# draw colorbar.
m.colorbar(im)
plt.title(gd.GetDescription()+' with state boundaries from '+g.GetName(),y=1.05)
plt.show()
| gpl-2.0 |
jmartinm/invenio | modules/websubmit/lib/websubmitadmin_templates.py | 34 | 182346 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import cgi
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG
from invenio.websubmitadmin_config import WEBSUBMITADMINURL, FUNCTIONS_WITH_FILE_PARAMS, WEBSUBMITADMINURL_OLD
def create_html_table_from_tuple(tableheader=None, tablebody=None, start="", end=""):
"""Create a table from a tuple or a list.
@param header: optional header for the columns (MUST be a list of header titles)
@param tablebody: table body (rows - tuple of tuples)
@param start: text to be added in the beginning, most likely beginning of a form
@param end: text to be added in the end, most likely end of a form.
"""
if type(tableheader) is None:
tableheader = ()
if type(tablebody) is None:
tablebody = ()
## determine table cells alignment based upon first row alignment
align = []
try:
if type(tablebody[0]) in [int, long]:
align = ['admintdright']
elif type(tablebody[0]) in [str, dict]:
align = ['admintdleft']
else:
for item in tablebody[0]:
if type(item) is int:
align.append('admintdright')
else:
align.append('admintdleft')
except IndexError:
## Empty tablebody
pass
## table header row:
tblstr = ""
for hdr in tableheader:
tblstr += """ <th class="adminheader">%s</th>\n""" % (hdr,)
if tblstr != "":
tblstr = """ <tr>\n%s</tr>\n""" % (tblstr, )
tblstr = start + """<table class="admin_wvar_nomargin">\n""" + tblstr
## table body
if len(tablebody) > 0:
j = 1
for row in tablebody:
j += 1
tblstr += """ <tr class="admin_row_highlight %s">\n""" % \
((j % 2) and 'admin_row_color' or '')
if type(row) not in [int, long, str, dict]:
for i in range(len(row)):
tblstr += """<td class="%s">%s</td>\n""" % (align[i], row[i])
else:
tblstr += """ <td class="%s">%s</td>\n""" % (align[0], row)
tblstr += """ </tr>\n"""
else:
# Empty tuple of table data - display message:
tblstr += """<tr>
<td class="admintdleft" colspan="%s"><span class="info">None</span></td>
</tr>
""" % (len(tableheader),)
tblstr += """</table>\n"""
tblstr += end
return tblstr
def create_html_select_list(select_name, option_list, selected_values="", default_opt="", multiple="", list_size="", css_style="", css_class=""):
"""Make a HTML "select" element from the parameters passed.
@param select_name: Name given to the HTML "select" element
@param option_list: a tuple of tuples containing the options (their values, followed by their
display text). Thus: ( (opt_val, opt_txt), (opt_val, opt_txt) )
It is also possible to provide a tuple of single-element tuples in the case when it is not desirable
to have different option text to the value, thus: ( (opt_val,), (opt_val,) ).
@param selected_value: can be a list/tuple of strings, or a single string/unicode string. Treated as
the "selected" values for the select list's options. E.g. if a value in the "option_list" param was
"test", and the "selected_values" parameter contained "test", then the test option would appear as
follows: '<option value="test" selected>'.
@param default_opt: The default option (value and displayed text) for the select list. If left blank, there
will be no default option, and the first "natural" option will appear first in the list.
If the value of "default_opt" is a string, then this string will be used both as the value and the displayed
text of the default option. If the value of "default_opt" is a tuple/list, then the first option will be
used as the option "value", and the second will be used as the option "displayed text". In the case that
the list/tuple length is only 1, the value will be used for both option "value" and "displayed text".
@param multiple: shall this be a multiple select box? If present, select box will be marked as "multiple".
@param list_size: the size for a multiple select list. If mutiple is present, then this optional size can
be provided. If not provided, the list size attribute will automatically be given the value of the list
length, up to 30.
@param css_style: A string: any additional CSS style information to be placed as the select element's "style"
attribute value.
@param css_class: A string: any class value for CSS.
@return: a string containing the completed HTML Select element
"""
## sanity checking:
if type(css_style) not in (str, unicode):
css_style = ""
if type(option_list) not in (list, tuple):
option_list = ()
txt = """\n <select name="%s"%s""" % ( cgi.escape(select_name, 1),
(multiple != "" and " multiple") or ("")
)
if multiple != "":
## Size attribute for multiple-select list
if (type(list_size) is str and list_size.isdigit()) or type(list_size) is int:
txt += """ size="%s\"""" % (list_size,)
else:
txt += """ size="%s\"""" % ( (len(option_list) <= 30 and str(len(option_list))) or ("30"),)
if css_style != "":
txt += """ style="%s\"""" % (cgi.escape(css_style, 1),)
txt += """>\n"""
if default_opt != "":
if type(default_opt) in (str, unicode):
## default_opt is a string - use its value as both option value and displayed text
txt += """ <option value="%(deflt_opt)s">%(deflt_opt)s</option>\n""" % {'deflt_opt' : cgi.escape(default_opt, 1)}
elif type(default_opt) in (list, tuple):
try:
txt += """ <option value="%(deflt_opt)s">""" % {'deflt_opt' : cgi.escape(default_opt[0], 1) }
try:
txt += """%(deflt_opt)s""" % {'deflt_opt' : cgi.escape(default_opt[1], 1) }
except IndexError:
txt += """%(deflt_opt)s""" % {'deflt_opt' : cgi.escape(default_opt[0], 1) }
txt += """</option>\n"""
except IndexError:
## seems to be an empty list - there will be no default opt
pass
for option in option_list:
try:
txt += """ <option value="%(option_val)s\"""" % { 'option_val' : cgi.escape(option[0], 1) }
if type(selected_values) in (list, tuple):
txt += """%(option_selected)s""" % \
{ 'option_selected' : (option[0] in selected_values and " selected") or ("") }
elif type(selected_values) in (str, unicode) and selected_values != "":
txt += """%(option_selected)s""" % \
{ 'option_selected' : (option[0] == selected_values and " selected") or ("") }
try:
txt += """>%(option_txt)s</option>\n""" % { 'option_txt' : cgi.escape(option[1], 1) }
except IndexError:
txt += """>%(option_txt)s</option>\n""" % { 'option_txt' : cgi.escape(option[0], 1) }
except IndexError:
## empty option tuple - skip
pass
txt += """ </select>\n"""
return txt
class Template:
"""Invenio Template class for creating Web interface"""
def tmpl_navtrail(self, ln=CFG_SITE_LANG):
"""display the navtrail, e.g.:
Home > Admin Area > WebSubmit Administration > Available WebSubmit Actions
@param title: the last part of the navtrail. Is not a link
@param ln: language
return html formatted navtrail
"""
return '<a class="navtrail" href="%s/help/admin">Admin Area</a> ' % (CFG_SITE_URL,)
def _create_adminbox(self, header="", datalist=[], cls="admin_wvar"):
"""Create an adminbox table around the main data on a page - row based.
@param header: the header for the "adminbox".
@param datalist: contents of the "body" to be encapsulated by the "adminbox".
@param cls: css-class to format the look of the table.
@return: the "adminbox" and its contents.
"""
if len(datalist) == 1:
per = "100"
else:
per = "75"
output = """
<table class="%s" width="95%%">
""" % (cls,)
output += """
<thead>
<tr>
<th class="adminheaderleft" colspan="%s">
%s
</th>
</tr>
</thead>
<tbody>""" % (len(datalist), header)
output += """
<tr>
<td style="vertical-align: top; margin-top: 5px; width: %s;">
%s
</td>
""" % (per+'%', datalist[0])
if len(datalist) > 1:
output += """
<td style="vertical-align: top; margin-top: 5px; width: %s;">
%s
</td>""" % ('25%', datalist[1])
output += """
</tr>
</tbody>
</table>
"""
return output
def _create_user_message_string(self, user_msg):
"""Create and return a string containing any message(s) to be shown to the user.
In particular, these messages are generally info/warning messages.
@param user_msg: The message to be shown to the user. This parameter can have either a
string value (in the case where one message is to be shown to the user), or a list/tuple
value, where each value in the list is a string containing the message to be shown to the
user.
@return: EITHER: a string containing a HTML "DIV" section, which contains the message(s) to be
displayed to the user. In the case where there were multiple messages, each message will be
placed on its own line, by means of a "<br />" tag.
OR: an empty string - in the case that the parameter "user_msg" was an empty string.
"""
user_msg_str = ""
user_msg_str_end = ""
if type(user_msg) in (str, unicode):
if user_msg == "":
user_msg = ()
else:
user_msg = (user_msg,)
if len(user_msg) > 0:
user_msg_str += """<div align="center">\n"""
user_msg_str_end = """</div><br />\n"""
for msg in user_msg:
user_msg_str += """<span class="info">%s</span><br />\n""" % (cgi.escape(msg, 1),)
user_msg_str += user_msg_str_end
return user_msg_str
def _create_websubmitadmin_main_menu_header(self):
"""Create the main menu to be displayed on WebSubmit Admin pages."""
menu_body = """
<div>
<table>
<tr>
<td>0. <small><a href="%(adminurl)s/showall">Show all</a></small></td>
<td> 1. <small><a href="%(adminurl)s/doctypelist">Available Document Types</a></small></td>
<td> 2. <small><a href="%(adminurl)s/doctypeadd">Add New Document Type</a></small></td>
<td> 3. <small><a href="%(adminurl)s/doctyperemove">Remove Document Type</a></small></td>
<td> 4. <small><a href="%(adminurl)s/actionlist">Available Actions</a></small></td>
<td> 5. <small><a href="%(adminurl)s/jschecklist">Available Checks</a></small></td>
</tr>
<tr>
<td>6. <small><a href="%(adminurl)s/elementlist">Available Elements</a></small></td>
<td> 7. <small><a href="%(adminurl)s/functionlist">Available Functions</a></small></td>
<td> 8. <small><a href="%(adminurl)s/organisesubmissionpage">Organise Main Page</a></small></td>
<td colspan=2> 9. <small><a href="%(siteurl)s/help/admin/websubmit-admin-guide">Guide</a></small></td>
</tr>
</table>
</div>
<br />
""" % { 'adminurl' : WEBSUBMITADMINURL, 'siteurl': CFG_SITE_URL }
return self._create_adminbox(header="Main Menu", datalist=[menu_body])
def _element_display_preview_get_element(self,
elname="",
eltype="",
elsize="",
elrows="",
elcols="",
elval="",
elfidesc="",
ellabel=""):
"""Return the raw display-code for an individual element.
@param
"""
preview = "%s" % (ellabel,)
try:
preview += {"D" : """ %s """ % (elfidesc,),
"F" : """<input type="file" %sname="dummyfile">""" % \
( (elsize != "" and """size="%s" """ % (cgi.escape(elsize, 1),) ) or (""),),
"H" : """<span class="info">Hidden Input. Contains Following Value: %s</span>""" % (cgi.escape(elval, 1),),
"I" : """<input type="text" %sname="dummyinput" value="%s">""" % \
( (elsize != "" and """size="%s" """ % (cgi.escape(elsize, 1),) ) or (""), cgi.escape(elval, 1)),
"R" : """<span class="info">Cannot Display Response Element - See Element Description</span>""",
"S" : """ %s """ % (elfidesc,),
"T" : """<textarea name="dummytextarea" %s%s></textarea>""" % \
( (elrows != "" and """rows="%s" """ % (cgi.escape(elrows, 1),) ) or (""),
(elcols != "" and """cols="%s" """ % (cgi.escape(elcols, 1),) ) or (""),)
}[eltype]
except KeyError:
## Unknown element type - display warning:
preview += """<span class="info">Element Type not Recognised - Cannot Display</span>"""
return preview
def _element_display_preview(self,
elname="",
eltype="",
elsize="",
elrows="",
elcols="",
elval="",
elfidesc=""
):
"""Return a form containing a preview of an element, based on the values of the parameters provided
@param elname: element name
@param eltype: element type (e.g. text, user-defined, etc)
@param elsize: element size (e.g. for text input element)
@param elrows: number of rows (e.g. for textarea element)
@param elcols: number of columns (e.g. for textarea element)
@param elval: value of element (e.g. for text input element)
@param elfidesc: description for element (e.g. for user-defined element)
@return: string of HTML making up a preview of the element in a table
"""
## Open a dummy form and table in which to display a preview of the element
body = """<div><br />
<form name="dummyeldisplay" action="%(adminurl)s/elementlist">
<table class="admin_wvar" align="center">
<thead>
<tr>
<th class="adminheaderleft" colspan="1">
Element Preview:
</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<br />
""" % {'adminurl' : WEBSUBMITADMINURL}
## Based on element type, display a preview of element:
body += self._element_display_preview_get_element(eltype=eltype, elsize=elsize, elrows=elrows, elcols=elcols,
elval=elval, elfidesc=elfidesc)
## Close dummy form and preview table:
body += """ <br />
</td>
</tr>
</tbody>
</table>
</form>
</div>"""
return body
def tmpl_display_addelementform(self,
elname="",
elmarccode="",
eltype="",
elsize="",
elrows="",
elcols="",
elmaxlength="",
elval="",
elfidesc="",
elmodifytext="",
elcd="",
elmd="",
perform_act="elementadd",
user_msg="",
el_use_tuple=""
):
"""Display Web form used to add a new element to the database
@param elname: element name
@param elmarccode: marc code of element
@param eltype: element type (e.g. text, user-defined, etc)
@param elsize: element size (e.g. for text input element)
@param elrows: number of rows (e.g. for textarea element)
@param elcols: number of columns (e.g. for textarea element)
@param elmaxlength: maximum length of a text input field
@param elval: value of element (e.g. for text input element)
@param elfidesc: description for element (e.g. for user-defined element)
@param elmodifytext: element's modification text
@param elcd: creation date of element
@param elmd: last modification date of element
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
@param el_use_tuple:
@return: HTML page body.
"""
## First, get a rough preview of the element:
output = ""
etypes = {"D" : "User Defined Input", "F" : "File Input", "H" : "Hidden Input", "I" : "Text Input", \
"R" : "Response", "S" : "Select Box", "T" : "Text Area Element"}
etypeids = etypes.keys()
etypeids.sort()
body_content = ""
output += self._create_user_message_string(user_msg)
if perform_act != "elementadd":
body_content += self._element_display_preview(elname=elname, eltype=eltype, elsize=elsize, \
elrows=elrows, elcols=elcols, elval=elval, elfidesc=elfidesc)
else:
body_content += "<br />"
body_content += """<form method="post" action="%(adminurl)s/%(perform_action)s">""" \
% {'adminurl': WEBSUBMITADMINURL, 'perform_action': perform_act}
body_content += """
<table width="100%%" class="admin_wvar">
<thead>
<tr>
<th class="adminheaderleft" colspan="2">
Enter Element Details:
</th>
</tr>
</thead>
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%"> </td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Element Name:</span></td>
<td width="80%%">"""
if perform_act == "elementadd":
body_content += """
<input type="text" size="30" name="elname" value="%(el_name)s" />""" % {'el_name' : cgi.escape(elname, 1)}
else:
body_content += """<span class="info">%(el_name)s</span><input type="hidden" name="elname" value="%(el_name)s" />""" \
% {'el_name' : cgi.escape(elname, 1)}
body_content += """</td>
</tr>"""
if elcd != "" and elcd is not None:
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(elcd), 1),)
if elmd != "" and elmd is not None:
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Last Modification Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(elmd), 1),)
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Modification Text:</span></td>
<td width="80%%"><input type="text" size="90" name="elmodifytext" value="%(el_modifytext)s" /></td>
</tr>""" % {'el_modifytext' : cgi.escape(elmodifytext, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Element Type:</span></td>
<td width="80%%">
<select name="eltype">
<option value="NONE_SELECTED">Select:</option>\n"""
for itm in etypeids:
body_content += """ <option value="%s"%s>%s</option>\n""" % \
( itm, (eltype == itm and " selected" ) or (""), cgi.escape(etypes[itm], 1) )
body_content += """ </select>
</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Marc Code:</span></td>
<td width="80%%"><input type="text" size="15" name="elmarccode" value="%(el_marccode)s" /></td>
</tr>
""" % {'el_marccode' : cgi.escape(elmarccode, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Size <i><small>(text elements)</small></i>:</span></td>
<td width="80%%"><input type="text" size="10" name="elsize" value="%(el_size)s" /></td>
</tr>
""" % {'el_size' : cgi.escape(elsize, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">No. Rows <i><small>(textarea elements)</small></i>:</span></td>
<td width="80%%"><input type="text" size="6" name="elrows" value="%(el_rows)s" /></td>
</tr>
""" % {'el_rows' : cgi.escape(elrows, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">No. Columns <i><small>(textarea elements)</small></i>:</span></td>
<td width="80%%"><input type="text" size="6" name="elcols" value="%(el_cols)s" /></td>
</tr>
""" % {'el_cols' : cgi.escape(elcols, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Maximum Length <i><small>(text elements)</small></i>:</span></td>
<td width="80%%"><input type="text" size="6" name="elmaxlength" value="%(el_maxlength)s" /></td>
</tr>
""" % {'el_maxlength' : cgi.escape(elmaxlength, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Value <i><small>(text/hidden elements)</small></i>:</span></td>
<td width="80%%"><input type="text" size="90" name="elval" value="%(el_val)s" /></td>
</tr>
""" % {'el_val' : cgi.escape(elval, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Element Description <i><small>(e.g. user-defined elements)</small></i>:</span></td>
<td width="80%%"><textarea cols="100" rows="30" name="elfidesc" wrap="nowarp">%(el_fidesc)s</textarea></td>
</tr>
""" % {'el_fidesc' : cgi.escape(elfidesc, 1)}
body_content += """
<tr>
<td width="20%%"> </td>
<td width="80%%"><input name="elcommit" class="adminbutton" type="submit" value="Save Details" /></td>
</tr>
</tbody>
</table>
</form>
"""
## If there is information about which submission pages use this element, display it:
if type(el_use_tuple) is tuple and len(el_use_tuple) > 0:
body_content += """<br /><br />
<table width="100%%" class="admin_wvar">
<thead>
<tr>
<th class="adminheaderleft" colspan="2">
Element Usage:
</th>
</tr>
</thead>
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%">"""
for usecase in el_use_tuple:
try:
body_content += """<small><a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s"""\
"""&action=%(action)s&pagenum=%(pageno)s"> %(subname)s: Page %(pageno)s</a></small><br />\n"""\
% { 'adminurl' : WEBSUBMITADMINURL,
'doctype' : usecase[0],
'action' : usecase[1],
'subname' : "%s%s" % (usecase[1], usecase[0]),
'pageno' : usecase[2]
}
except KeyError, e:
pass
body_content += """ </td>
</tr>
</tbody>
</table>
"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Element Details:", datalist=[body_content])
return output
def tmpl_display_submission_page_organisation(self,
submission_collection_tree,
submission_collections,
doctypes,
user_msg=""):
def _build_collection_tree_display(branch, level=0):
outstr = ""
try:
level = int(level)
except TypeError:
level = 0
## open a table in which collection and doctype children will be displayed:
outstr += """<table border ="0" cellspacing="0" cellpadding="0">\n<tr>"""
## Display details of this collection:
if level != 0:
## Button to allow deletion of collection from tree:
outstr += """<td><a href="%(adminurl)s/organisesubmissionpage?sbmcolid=%(collection_id)s""" \
"""&deletesbmcollection=1"><img border="0" src="%(siteurl)s/img/iconcross.gif" """ \
"""title="Remove submission collection from tree"></a></td>""" \
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'collection_id' : cgi.escape(str(branch['collection_id']), 1),
}
## does this collection have a collection brother above it?
if branch['has_brother_above'] == 1:
## Yes it does - add 'up' arrow:
outstr += """<td><a href="%(adminurl)s/organisesubmissionpage?sbmcolid=%(collection_id)s""" \
"""&movesbmcollectionup=1"><img border="0" src="%(siteurl)s/img/smallup.gif" """\
"""title="Move submission collection up"></a></td>""" \
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'collection_id' : cgi.escape(str(branch['collection_id']), 1),
}
else:
## No it doesn't - no 'up' arrow:
outstr += """<td><img border="0" src="%(siteurl)s/img/white_field.gif"></td>"""\
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1), }
## does this collection have a collection brother below it?
if branch['has_brother_below'] == 1:
## Yes it does - add 'down' arrow:
outstr += """<td><a href="%(adminurl)s/organisesubmissionpage?sbmcolid=%(collection_id)s""" \
"""&movesbmcollectiondown=1"><img border="0" src="%(siteurl)s/img/smalldown.gif" """\
"""title="Move submission collection down"></a></td>""" \
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'collection_id' : cgi.escape(str(branch['collection_id']), 1),
}
else:
## No it doesn't - no 'down' arrow:
outstr += """<td><img border="0" src="%(siteurl)s/img/white_field.gif"></td>"""\
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1), }
## Display the collection name:
outstr += """<td> <span style="color: green; font-weight: bold;">%s</span></td>""" \
% branch['collection_name']
else:
outstr += "<td> </td><td> </td><td> </td><td> </td>"
outstr += "</tr>\n"
## If there are doctype children attached to this collection, display them:
num_doctype_children = len(branch['doctype_children'])
if num_doctype_children > 0:
outstr += """<tr><td> </td><td> </td><td> </td><td>""" \
"""<table border ="0" cellspacing="0" cellpadding="0">\n"""
for child_num in xrange(0, num_doctype_children):
outstr += """<tr>\n"""
## Button to allow doctype to be detached from tree:
outstr += """<td><a href="%(adminurl)s/organisesubmissionpage?sbmcolid=%(collection_id)s""" \
"""&doctype=%(doctype)s&catscore=%(catalogueorder)s&deletedoctypefromsbmcollection=1"><img border="0" """\
"""src="%(siteurl)s/img/iconcross.gif" title="Remove doctype from branch"></a></td>""" \
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'collection_id' : cgi.escape(str(branch['collection_id']), 1),
'doctype' : cgi.escape(branch['doctype_children'][child_num]['doctype_id']),
'catalogueorder' : cgi.escape(str(branch['doctype_children'][child_num]['catalogue_order']), 1),
}
## Does this doctype have a brother above it?
if child_num > 0:
## Yes it does - add an 'up' arrow:
outstr += """<td><a href="%(adminurl)s/organisesubmissionpage?sbmcolid=%(collection_id)s""" \
"""&doctype=%(doctype)s&catscore=%(catalogueorder)s&movedoctypeupinsbmcollection=1"><img border="0" """ \
"""src="%(siteurl)s/img/smallup.gif" title="Move doctype up"></a></td>""" \
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'collection_id' : cgi.escape(str(branch['collection_id']), 1),
'doctype' : cgi.escape(branch['doctype_children'][child_num]['doctype_id']),
'catalogueorder' : cgi.escape(str(branch['doctype_children'][child_num]['catalogue_order']), 1),
}
else:
## No it doesn't - no 'up' arrow:
outstr += """<td><img border="0" src="%(siteurl)s/img/white_field.gif"></td>"""\
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1), }
## Does this doctype have a brother below it?
if child_num < num_doctype_children - 1:
## Yes it does - add a 'down' arrow:
outstr += """<td><a href="%(adminurl)s/organisesubmissionpage?sbmcolid=%(collection_id)s""" \
"""&doctype=%(doctype)s&catscore=%(catalogueorder)s&movedoctypedowninsbmcollection=1"><img border="0" """ \
"""src="%(siteurl)s/img/smalldown.gif" title="Move doctype down"></a></td>""" \
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'collection_id' : cgi.escape(str(branch['collection_id']), 1),
'doctype' : cgi.escape(branch['doctype_children'][child_num]['doctype_id']),
'catalogueorder' : cgi.escape(str(branch['doctype_children'][child_num]['catalogue_order']), 1),
}
else:
## No it doesn't - no 'down' arrow:
outstr += """<td><img border="0" src="%(siteurl)s/img/white_field.gif"></td>"""\
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1), }
## Display the document type details:
outstr += """<td> <small><a href="%(adminurl)s/doctypeconfigure?doctype=%(doctype)s">"""\
"""%(doctype_name)s [%(doctype)s]</a></small></td>""" \
% { 'adminurl' : WEBSUBMITADMINURL,
'doctype' : cgi.escape(branch['doctype_children'][child_num]['doctype_id'], 1),
'doctype_name' : cgi.escape(branch['doctype_children'][child_num]['doctype_lname'], 1),
}
outstr += "</tr>\n"
## If there were doctype children attached to this collection, they have been displayed,
## so close up the row:
if num_doctype_children > 0:
outstr += "</table>\n</td></tr>"
## Display Lower branches of tree:
for lower_branch in branch['collection_children']:
outstr += "<tr><td> </td><td> </td><td> </td><td>"
outstr += _build_collection_tree_display(branch=lower_branch, level=level+1)
outstr += "</td></tr>\n"
outstr += "</table>"
return outstr
## begin display:
output = ""
body_content = """<br />
<table class="admin_wvar" width="100%%">
<thead>
<tr>
<th class="adminheaderleft">
Submission Page Organisational Hierarchy:
</th>
</tr>
</thead>
<tbody>
<tr>
<td><br />"""
body_content += _build_collection_tree_display(submission_collection_tree)
body_content += """</td>
</tr>"""
body_content += """
<tr>
<td><br /></td>
</tr>
<tr>
<td><br />"""
## Form to allow user to add a new submission-collection:
body_content += """
<form method="post" action="%(adminurl)s/organisesubmissionpage">
<span class="adminlabel">You can add a new Submission-Collection:</span><br />
<small style="color: navy;">Name:</small>
<input type="text" name="addsbmcollection" style="margin: 5px 10px 5px 10px;" />
<small style="color: navy;">Attached to:</small> """ \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1), }
if len(submission_collections) > 0:
body_content += """
%(submission_collections)s""" \
% { 'submission_collections' : \
create_html_select_list(select_name="addtosbmcollection",
option_list=submission_collections,
css_style="margin: 5px 10px 5px 10px;")
}
else:
body_content += """<input type="hidden" name="addtosbcollection" value="0" />
<span style="color: green;">Top Level</span>"""
body_content += """<input name="sbmcollectionadd" class="adminbutton" type="submit" """ \
"""value="Add" />
</form>"""
body_content += """</td>
</tr>
<tr>
<td><br /><br /></td>
</tr>"""
## if there are doctypes in the system, provide a form to enable the user to
## connect a document type to the submission-collection tree:
if len(submission_collections) > 1 and len(doctypes) > 0:
body_content += """<tr><td>
<form method="post" action="%(adminurl)s/organisesubmissionpage">
<span class="adminlabel">You can attach a Document Type to a Submission-Collection:</span><br />
<small style="color: navy;">Document Type Name:</small><br />
%(doctypes)s
<br /><small style="color: navy;">Attached to:</small> """ \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctypes' : create_html_select_list(select_name="adddoctypes",
option_list=doctypes,
css_style="margin: 5px 10px 5px 10px;",
multiple=1,
list_size=5)
}
body_content += """
%(submission_collections)s""" \
% { 'submission_collections' : \
create_html_select_list(select_name="addtosbmcollection",
option_list=submission_collections[1:],
css_style="margin: 5px 10px 5px 10px;")
}
body_content += """<input name="submissioncollectionadd" class="adminbutton" type="submit" """ \
"""value="Add" />
</form></td>
</tr>"""
body_content += """</tbody>
</table>"""
output += self._create_user_message_string(user_msg)
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Submission-Collections of Submission Page:", datalist=[body_content])
return output
def tmpl_display_addactionform(self,
actid="",
actname="",
working_dir="",
status_text="",
perform_act = "actionadd",
cd="",
md="",
user_msg=""):
"""Display web form used to add a new action to Websubmit.
@param actid: Value of the "sactname" (action id) parameter of the Websubmit action.
@param actname: Value of the "lactname" (long action name) parameter of the Websubmit action.
@param working_dir: Value of the "dir" (action working/archive directory) parameter of the Websubmit action.
@param status_text: Value of the "statustext" (action status text) parameter of the WebSubmit action.
@param perform_act: action for form (minus websubmitadmin base url)
@param cd: Creation date of action.
@param md: Modification date of action.
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
@return: HTML page body.
"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<form method="post" action="%(adminurl)s/%(perform_action)s">""" \
% {'adminurl': WEBSUBMITADMINURL, 'perform_action': perform_act}
body_content += """
<table width="90%%">
<tr>
<td width="20%%"><span class="adminlabel">Action Code:</span></td>
<td width="80%%">"""
if perform_act == "actionadd":
body_content += """
<input type="text" size="6" name="actid" value="%(ac_id)s" />""" % {'ac_id' : cgi.escape(actid, 1)}
else:
body_content += """<span class="info">%(ac_id)s</span><input type="hidden" name="actid" value="%(ac_id)s" />""" \
% {'ac_id' : cgi.escape(actid, 1)}
body_content += """</td>
</tr>"""
if "" not in (cd, md):
if cd is not None:
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(cd), 1),)
if md is not None:
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Last Modification Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(md), 1), )
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Action Description:</span></td>
<td width="80%%"><input type="text" size="60" name="actname" value="%(ac_name)s" /></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Action dir:</span></td>
<td width="80%%"><input type="text" size="40" name="working_dir" value="%(w_dir)s" /></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Action Status Text:</span></td>
<td width="80%%"><input type="text" size="60" name="status_text" value="%(s_txt)s" /></td>
</tr>""" % {'ac_name' : cgi.escape(actname, 1), 'w_dir' : cgi.escape(working_dir, 1), \
's_txt' : cgi.escape(status_text, 1)}
body_content += """
<tr>
<td colspan="2">
<table>
<tr>
<td>
<input name="actcommit" class="adminbutton" type="submit" value="Save Details" />
</form>
</td>
<td>
<br />
<form method="post" action="%(adminurl)s/actionlist">
<input name="actcommitcancel" class="adminbutton" type="submit" value="Cancel" />
</form>
</td>
</tr>
</table>
</td>
</tr>
</table>
</form>
""" % { 'adminurl' : WEBSUBMITADMINURL }
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Enter Action Details:", datalist=[body_content])
return output
def tmpl_display_addjscheckform(self,
chname="",
chdesc="",
perform_act = "jscheckadd",
cd="",
md="",
user_msg=""):
"""Display web form used to add a new Check to Websubmit.
@param chname: Value of the "chname" (check ID/name) parameter of the WebSubmit Check.
@param chdesc: Value of the "chdesc" (check description - i.e. JS code) parameter of the
WebSubmit Check.
@param perform_act: action for form (minus websubmitadmin base url)
@param cd: Creation date of check.
@param md: Modification date of check.
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
@return: HTML page body.
"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<form method="post" action="%(adminurl)s/%(perform_action)s">""" \
% {'adminurl': WEBSUBMITADMINURL, 'perform_action': perform_act}
body_content += """
<table width="90%%">
<tr>
<td width="20%%"><span class="adminlabel">Check Name:</span></td>
<td width="80%%">"""
if perform_act == "jscheckadd":
body_content += """
<input type="text" size="15" name="chname" value="%(ch_name)s" />""" % {'ch_name' : cgi.escape(chname, 1)}
else:
body_content += """<span class="info">%(ch_name)s</span><input type="hidden" name="chname" value="%(ch_name)s" />""" \
% {'ch_name' : cgi.escape(chname, 1)}
body_content += """</td>
</tr>"""
if "" not in (cd, md):
if cd is not None:
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(cd), 1),)
if md is not None:
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Last Modification Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(md), 1),)
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Check Description:</span></td>
<td width="80%%">
<textarea cols="90" rows="22" name="chdesc">%(ch_descr)s</textarea>
</td>
</tr>
<tr>
<td width="20%%"> </td>
<td width="80%%"><input name="chcommit" class="adminbutton" type="submit" value="Save Details" /></td>
</tr>
</table>
</form>
""" % {'ch_descr' : cgi.escape(chdesc, 1)}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Enter Action Details:", datalist=[body_content])
return output
def tmpl_display_addfunctionform(self,
funcname="",
funcdescr="",
func_parameters=None,
all_websubmit_func_parameters=None,
perform_act="functionadd",
user_msg="",
func_docstring=None):
"""Display web form used to add a new function to Websubmit.
@param funcname: Value of the "function" (unique function name) parameter
@param chdesc: Value of the "description" (function textual description) parameter
@param perform_act: action for form (minus websubmitadmin base url)
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
@param func_docstring: the docstring of the displayed function (or error message if function could not be loaded). None if no docstring
@return: HTML page body.
"""
if type(func_parameters) not in (list, tuple):
## bad parameters list - reset
func_parameters = ()
if type(all_websubmit_func_parameters) not in (list, tuple):
## bad list of function parameters - reset
all_websubmit_func_parameters = ()
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<form method="post" action="%(adminurl)s/%(perform_action)s">""" \
% {'adminurl' : WEBSUBMITADMINURL, 'perform_action': perform_act}
## Function Name and description:
body_content += """<br />
<table width="100%%" class="admin_wvar">
<thead>
<tr>
<th class="adminheaderleft" colspan="2">
%sFunction Details:
</th>
</tr>
</thead>
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%"> </td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Function Name:</span></td>
<td width="80%%">""" % ((funcname != "" and cgi.escape(funcname, 1) + " ") or (""), )
if perform_act == "functionadd" and funcname == "":
body_content += """
<input type="text" size="30" name="funcname" />"""
else:
body_content += """<span class="info">%(func_name)s</span><input type="hidden" name="funcname" value="%(func_name)s" />""" \
% {'func_name' : cgi.escape(funcname, 1)}
body_content += """</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Function Description:</span></td>
<td width="80%%"><input type="text" size="90" name="funcdescr" value="%(func_descr)s" />
</tr>
""" % {'func_descr' : cgi.escape(funcdescr, 1)}
if func_docstring:
body_content += """
<tr>
<td width="20%%" valign="top"><span class="adminlabel">Function Documentation:</span></td>
<td width="80%%">%(func_docstring)s</td>
</tr>
""" % {'func_docstring': func_docstring}
body_content += """
<tr>
<td width="20%%" colspan="2"> </td>
</tr>
<tr>
<td width="20%%" colspan="2"><input name="%s" class="adminbutton" type="submit" value="Save Details" /></td>
</tr>
</tbody>
</table>""" % ( ((perform_act == "functionadd" and funcname == "") and "funcaddcommit") or ("funcdescreditcommit"), )
if funcname not in ("", None):
body_content += """<br />
<table width="100%%" class="admin_wvar">
<thead>
<tr>
<th class="adminheaderleft">
Parameters for Function %(func_name)s:
</th>
</tr>
</thead>
<tbody>
<tr>
<td><br />""" % {'func_name' : cgi.escape(funcname, 1)}
params_tableheader = ["Parameter", " "]
params_tablebody = []
for parameter in func_parameters:
params_tablebody.append( ("<small>%s</small>" % (cgi.escape(parameter[0], 1),),
"""<small><a href="%(adminurl)s/functionedit?funcparamdelcommit=funcparamdelcommit""" \
"""&funcname=%(func_name)s&funceditdelparam=%(delparam_name)s">delete</a></small>""" \
% { 'adminurl' : WEBSUBMITADMINURL,
'func_name' : cgi.escape(funcname, 1),
'delparam_name' : cgi.escape(parameter[0], 1)
}
) )
body_content += create_html_table_from_tuple(tableheader=params_tableheader, tablebody=params_tablebody)
body_content += """</td>
</tr>
</tbody>
</table>
<br />"""
## Add a parameter?
body_content += """<table width="100%%" class="admin_wvar">
<thead>
<tr>
<th class="adminheaderleft" colspan="2">
Add Parameter to Function %(func_name)s:
</th>
</tr>
</thead>
<tbody>""" % {'func_name' : cgi.escape(funcname, 1)}
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Add Parameter:</span></td>
<td width="80%%"><small>Select a parameter to add to function:</small> %s """ \
% (create_html_select_list(select_name="funceditaddparam", option_list=all_websubmit_func_parameters),)
body_content += """<small>-Or-</small> <small>Enter a new parameter:</small> <input type="text" """ \
+ """name="funceditaddparamfree" size="15" /><input name="funcparamaddcommit" class="adminbutton" """ \
+ """type="submit" value="Add" /></td>
</tr>
</tbody>
</table>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Enter Function Details:", datalist=[body_content])
return output
def tmpl_display_function_usage(self, funcname, func_usage, user_msg=""):
"""Display a table containing the details of a function's usage in the various actions of the various doctypes.
Displayed will be information about the document type and action, and the score and step at which
the function is called within that action.
@param funcname: (string) function name.
@param func_usage: (tuple) A tuple of tuples, each containing details of the function usage:
(doctype, docname, function-step, function-score, action id, action name)
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
@return: (string) HTML page body.
"""
output = ""
body_content = ""
header = ["Doctype", " ", "Action", " ", "Score", "Step", "Show Details"]
tbody = []
output += self._create_user_message_string(user_msg)
body_content += "<br />"
for usage in func_usage:
tbody.append( ("<small>%s</small>" % (cgi.escape(usage[0], 1),),
"<small>%s</small>" % (cgi.escape(usage[1], 1),),
"<small>%s</small>" % (cgi.escape(usage[2], 1),),
"<small>%s</small>" % (cgi.escape(usage[3], 1),),
"<small>%s</small>" % (cgi.escape(usage[4], 1),),
"<small>%s</small>" % (cgi.escape(usage[5], 1),),
"""<small><a href="%s/doctypeconfiguresubmissionfunctions?doctype=%s&action=%s"""\
"""&viewSubmissionFunctions=true">Show</a></small>"""\
% (WEBSUBMITADMINURL, cgi.escape(usage[0], 1), cgi.escape(usage[2], 1))
)
)
body_content += create_html_table_from_tuple(tableheader=header, tablebody=tbody)
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="""Usage of the "%s" Function:""" % (cgi.escape(funcname, 1),), datalist=[body_content])
return output
def tmpl_display_allactions(self,
actions,
user_msg=""):
"""Create the page body used for displaying all Websubmit actions.
@param actions: A tuple of tuples containing the action id, and the action name (actid, actname).
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
@return: HTML page body.
"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<div>
<table>
"""
for action in actions:
body_content += """<tr>
<td align="left"> <a href="%s/actionedit?actid=%s">%s: %s</a></td>
</tr>
""" % (WEBSUBMITADMINURL, cgi.escape(action[0], 1), cgi.escape(action[0], 1), cgi.escape(action[1], 1))
body_content += """</table>"""
## Button to create new action:
body_content += """<br /><form action="%s/actionadd" METHOD="post"><input class="adminbutton" type="submit" value="Add Action" /></form>""" \
% (WEBSUBMITADMINURL,)
body_content += """</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Select an Action:", datalist=[body_content])
return output
def tmpl_display_alldoctypes(self,
doctypes,
user_msg = ""):
"""Create the page body used for displaying all Websubmit document types.
@param doctypes: A tuple of tuples containing the doctype id, and the doctype name (docid, docname).
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
return: HTML page body.
"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<div>
<table>
"""
for doctype in doctypes:
body_content += """<tr>
<td align="left"> <a href="%s/doctypeconfigure?doctype=%s">%s [%s]</a></td>
</tr>
""" % (WEBSUBMITADMINURL, cgi.escape(doctype[0], 1), cgi.escape(doctype[1], 1), cgi.escape(doctype[0], 1))
body_content += """</table>"""
## Button to create new action:
body_content += """<br /><form action="%s/doctypeadd" METHOD="post"><input class="adminbutton" type="submit" value="Add New Doctype" /></form>""" \
% (WEBSUBMITADMINURL,)
body_content += """</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Select a Document Type:", datalist=[body_content])
return output
def tmpl_display_alljschecks(self,
jschecks,
user_msg = ""):
"""Create the page body used for displaying all Websubmit JavaScript Checks.
@param jschecks: A tuple of tuples containing the check name (chname, which is unique for
each check.)
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
return: HTML page body.
"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<div>
<table>
"""
for jscheck in jschecks:
body_content += """<tr>
<td align="left"> <a href="%s/jscheckedit?chname=%s">%s</a></td>
</tr>
""" % (WEBSUBMITADMINURL, cgi.escape(jscheck[0], 1), cgi.escape(jscheck[0], 1))
body_content += """</table>"""
## Button to create new action:
body_content += """<br /><form action="%s/jscheckadd" METHOD="post"><input class="adminbutton" type="submit" value="Add Check" /></form>""" \
% (WEBSUBMITADMINURL,)
body_content += """</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Select a Checking Function:", datalist=[body_content])
return output
def tmpl_display_allfunctions(self,
functions,
user_msg = ""):
"""Create the page body used for displaying all Websubmit functions.
@param functions: A tuple of tuples containing the function name, and the function
description (function, description).
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
return: HTML page body.
"""
output = ""
header = ["Function Name", "View Usage", "Edit Details"]
output += self._create_user_message_string(user_msg)
body_content = """<div><br />\n"""
tbody = []
for function in functions:
tbody.append((" %s" % (cgi.escape(function[0], 1),),
"""<small><a href="%s/functionusage?funcname=%s">View Usage</a></small>""" % \
(WEBSUBMITADMINURL, cgi.escape(function[0], 1)),
"""<small><a href="%s/functionedit?funcname=%s">Edit Details</a></small>""" % \
(WEBSUBMITADMINURL, cgi.escape(function[0], 1))
))
button_newfunc = """<form action="%s/functionadd" METHOD="post">
<input class="adminbutton" type="submit" value="Add New Function" />
</form>""" % (WEBSUBMITADMINURL,)
body_content += create_html_table_from_tuple(tableheader=header, tablebody=tbody, end=button_newfunc)
body_content += """</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="WebSubmit Functions:", datalist=[body_content])
return output
def tmpl_display_allelements(self,
elements,
user_msg = ""):
"""Create the page body used for displaying all Websubmit elements.
@param elements: A tuple of tuples containing the element name (name).
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
return: HTML page body.
"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<div>
<table style="align:center;">
"""
for element in elements:
body_content += """<tr>
<td align="left"> <a href="%s/elementedit?elname=%s">%s</a></td>
</tr>
""" % (WEBSUBMITADMINURL, cgi.escape(element[0], 1), cgi.escape(element[0], 1))
body_content += """</table>"""
## Button to create new action:
body_content += """<br /><form action="%s/elementadd" METHOD="post"><input class="adminbutton" type="submit" value="Add New Element" /></form>""" \
% (WEBSUBMITADMINURL,)
body_content += """</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Select an Element:", datalist=[body_content])
return output
def tmpl_display_delete_doctype_form(self, doctype="", alldoctypes="", user_msg=""):
"""TODO: DOCSTRING"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = "<div>"
if doctype not in ("", None) and type(doctype) in (str, unicode):
## Display the confirmation message:
body_content += """<form method="get" action="%(adminurl)s/doctyperemove">""" \
"""<input type="hidden" name="doctype" value="%(doc_type)s" />\n""" \
% { 'adminurl' : WEBSUBMITADMINURL, 'doc_type' : cgi.escape(doctype, 1) }
body_content += """<div><span class="info"><i>Really</i> remove document type "%s" and all of its configuration details?</span> <input name="doctypedeleteconfirm" class="adminbutton\""""\
"""type="submit" value="Confirm" /></div>\n</form>\n""" % (cgi.escape(doctype,) )
else:
## just display the list of document types to delete:
if type(alldoctypes) not in (list, tuple):
## bad list of document types - reset
alldoctypes = ()
body_content += """<form method="get" action="%(adminurl)s/doctyperemove">""" \
% { 'adminurl' : WEBSUBMITADMINURL }
body_content += """
<table width="100%%" class="admin_wvar">
<thead>
<tr>
<th class="adminheaderleft">
Select a Document Type to Remove:
</th>
</tr>
</thead>
<tbody>
<tr>
<td> %s <input name="doctypedelete" class="adminbutton" type="submit" value="Remove" /></td>
</tr>
</table>
</form>""" \
% (create_html_select_list(select_name="doctype", option_list=alldoctypes),)
body_content += """</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Remove a Document Type:", datalist=[body_content])
return output
## DOCTYPE CONFIGURE
def tmpl_display_submission_clone_form(self,
doctype,
action,
clonefrom_list,
user_msg=""
):
if type(clonefrom_list) not in (list, tuple):
clonefrom_list = ()
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<form method="get" action="%(adminurl)s/%(formaction)s">""" \
% { 'adminurl' : WEBSUBMITADMINURL , 'formaction' : cgi.escape("doctypeconfigure", 1) }
body_content += """
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="action" value="%(action)s" />
<table width="90%%">
<tr>
<td width="20%%"><span class="adminlabel">Clone from Document Type:</span></td>
<td width="80%%">
%(clonefrom)s
</td>
</tr>
<tr>
<td width="20%%"> </td>
<td width="80%%">
<input name="doctypesubmissionaddclonechosen" class="adminbutton" type="submit" value="Continue" />
<input name="doctypesubmissionaddclonechosencancel" class="adminbutton" type="submit" value="Cancel" />
</td>
</tr>
</table>
</form>""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'clonefrom' : create_html_select_list(select_name="doctype_cloneactionfrom",
option_list=clonefrom_list,
default_opt=("None", "Do not clone from another Document Type/Submission"),
css_style="margin: 5px 10px 5px 10px;"
)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Add Submission '%s' to Document Type '%s':" % (action, doctype),
datalist=[body_content])
return output
def tmpl_display_delete_doctypesubmission_form(self, doctype="", action="", user_msg=""):
"""TODO: DOCSTRING"""
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<div>"""
## Display the confirmation message:
body_content += """<form method="get" action="%(adminurl)s/%(formaction)s">""" \
"""<input type="hidden" name="doctype" value="%(doctype)s" />\n""" \
"""<input type="hidden" name="action" value="%(action)s" />\n""" \
% { 'adminurl' : WEBSUBMITADMINURL,
'formaction' : "doctypeconfigure",
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1)
}
body_content += """<div><span class="info"><i>Really</i> remove the Submission "%s" and all related details from Document Type "%s"?</span> <input name="doctypesubmissiondeleteconfirm" class="adminbutton" """ \
"""type="submit" value="Confirm" /></div>\n</form>\n""" % (cgi.escape(action, 1), cgi.escape(doctype, 1) )
body_content += """</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="""Delete Submission "%s" from Document Type "%s"\"""" % (action, doctype), datalist=[body_content])
return output
def tmpl_display_submissiondetails_form(self,
doctype,
action,
displayed="",
buttonorder="",
statustext="",
level="",
score="",
stpage="",
endtxt="",
cd="",
md="",
user_msg="",
perform_act="doctypeconfigure",
saveaction="edit"
):
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<form method="get" action="%(adminurl)s/%(action)s">""" \
% { 'adminurl' : WEBSUBMITADMINURL , 'action' : cgi.escape(perform_act, 1) }
body_content += """
<table width="90%%">"""
if cd not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(cd), 1),)
if md not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Last Modification Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(md), 1),)
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Submission Displayed on Start Page:</span></td>
<td width="80%%">
<select name="displayed">
<option value="Y"%s>Yes</option>
<option value="N"%s>No</option>
</select>
</td>
</tr>""" % ( (displayed == "Y" and " selected") or (""),
(displayed == "N" and " selected") or ("")
)
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Button Order:</span></td>
<td width="80%%">
<input type="text" size="4" name="buttonorder" value="%(buttonorder)s" />
</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Status Text:</span></td>
<td width="80%%">
<input type="text" size="35" name="statustext" value="%(statustext)s" />
</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Level:</span></td>
<td width="80%%">
<input type="text" size="4" name="level" value="%(level)s" />
</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Score:</span></td>
<td width="80%%">
<input type="text" size="4" name="score" value="%(score)s" />
</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Stpage:</span></td>
<td width="80%%">
<input type="text" size="4" name="stpage" value="%(stpage)s" />
</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">End Text:</span></td>
<td width="80%%">
<input type="text" size="35" name="endtxt" value="%(endtxt)s" />
</td>
</tr>
<tr>
<td width="20%%"> </td>
<td width="80%%">
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input name="%(savebutton)s" class="adminbutton" type="submit" value="Save Details" />
<input name="doctypesubmissiondetailscancel" class="adminbutton" type="submit" value="Cancel" />
</td>
</tr>
</table>
</form>
""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'displayed' : cgi.escape(displayed, 1),
'buttonorder' : cgi.escape(buttonorder, 1),
'statustext' : cgi.escape(statustext, 1),
'level' : cgi.escape(level, 1),
'score' : cgi.escape(score, 1),
'stpage' : cgi.escape(stpage, 1),
'endtxt' : cgi.escape(endtxt, 1),
'cd' : cgi.escape(cd, 1),
'md' : cgi.escape(md, 1),
'savebutton' : ((saveaction == "edit" and "doctypesubmissioneditdetailscommit") or ("doctypesubmissionadddetailscommit"))
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Enter Details of '%s' Submission of '%s' Document Type:" % (action, doctype),
datalist=[body_content])
return output
def tmpl_display_doctypedetails_form(self, doctype="", doctypename="", doctypedescr="", cd="", md="", clonefrom="", \
alldoctypes="", user_msg="", perform_act="doctypeadd"):
"""TODO : DOCSTRING"""
output = ""
body_content = ""
if perform_act == "doctypeadd":
formheader = "Add a new Document Type:"
else:
formheader = "Edit Document Type Details:"
output += self._create_user_message_string(user_msg)
if type(alldoctypes) not in (list, tuple):
## bad list of document types - reset
alldoctypes = ()
body_content += """<form method="post" action="%(adminurl)s/%(action)s">""" \
% { 'adminurl' : WEBSUBMITADMINURL , 'action' : cgi.escape(perform_act, 1) }
body_content += """
<table width="90%%">
<tr>
<td width="20%%"><span class="adminlabel">Document Type ID:</span></td>
<td width="80%%">"""
if perform_act == "doctypeadd":
body_content += """<input type="text" size="15" name="doctype" value="%(doctype_id)s" />""" \
% {'doctype_id' : cgi.escape(doctype, 1)}
else:
body_content += """<span class="info">%(doctype_id)s</span><input type="hidden" name="doctype" value="%(doctype_id)s" />""" \
% {'doctype_id' : cgi.escape(doctype, 1)}
body_content += """</td>
</tr>"""
if perform_act != "doctypeadd":
if cd not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(cd), 1),)
if md not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Last Modification Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(md), 1), )
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Document Type Name:</span></td>
<td width="80%%"><input type="text" size="60" name="doctypename" value="%(doctype_name)s" /></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Document Type Description:</span></td>
<td width="80%%"><textarea name="doctypedescr" cols="60" rows="8">%(doctype_description)s</textarea></td>
</tr>""" % { 'doctype_name' : cgi.escape(doctypename, 1),
'doctype_description' : "%s" % ((doctypedescr is not None and cgi.escape(str(doctypedescr), 1)) or ("")),
}
if perform_act == "doctypeadd":
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Document Type to Clone:</span></td>
<td width="80%%">%(doctype_select_list)s</td>
</tr>""" % { 'doctype_select_list' :
create_html_select_list(select_name="clonefrom",
option_list=alldoctypes,
selected_values=clonefrom,
default_opt=('None', 'Select:')
)
}
body_content += """
<tr>
<td width="20%%"> </td>
<td width="80%%">
<input name="doctypedetailscommit" class="adminbutton" type="submit" value="Save Details" />"""
if perform_act != "doctypeadd":
## add a cancel button if this is not a call to add a new document type:
body_content += """
<input name="doctypedetailscommitcancel" class="adminbutton" type="submit" value="cancel" />"""
body_content += """
</td>
</tr>
</table>
</form>\n"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header=formheader, datalist=[body_content])
return output
def _tmpl_configire_doctype_overview_create_doctype_details(self, doctype="", doctypename="", doctypedescr="",
doctype_cdate="", doctype_mdate="", perform_act="doctypeconfigure"
):
"""Display the details of a document type"""
txt = """
<table class="admin_wvar" rules="rows" width="100%%">
<thead>
<tr style="border-bottom: hidden">
<th class="adminheaderleft" colspan="2">
%(doctype_id)s Document Type Details:
</th>
</tr>
</thead>
<tbody>
<tr style="border-top: hidden; border-bottom: hidden">
<td width="20%%"> </td>
<td width="80%%"> </td>
</tr>
<tr>
<td width="20%%" style="border-bottom: hidden"><span class="adminlabel">Document Type ID:</span></td>
<td width="80%%"><span class="info">%(doctype_id)s</span></td>
</tr>
<tr>
<td width="20%%" style="border-bottom: hidden"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%(doctype_cdate)s</span></td>
</tr>
<tr>
<td width="20%%" style="border-bottom: hidden"><span class="adminlabel">Modification Date:</span></td>
<td width="80%%"><span class="info">%(doctype_mdate)s</span></td>
</tr>
<tr>
<td width="20%%" style="border-top: hidden; border-bottom: hidden"><span class="adminlabel">Document Type Name:</span></td>
<td width="80%%"><span>%(doctype_name)s</span></td>
</tr>
<tr style="border-bottom: hidden">
<td width="20%%" style="border-top: hidden"><span class="adminlabel">Document Type Description:</span></td>
<td width="80%%"><span>%(doctype_descr)s</span></td>
</tr>
<tr style="border-top: hidden">
<td colspan="2">
<form method="post" action="%(adminurl)s/%(performaction)s">
<input name="doctype" type="hidden" value="%(doctype_id)s" />
<input name="doctypedetailsedit" class="adminbutton" type="submit" value="Edit Details" />
</form>
</td>
</tr>
</tbody>
</table>\n""" % { 'doctype_id' : cgi.escape(doctype, 1),
'doctype_cdate' : "%s" % ((doctype_cdate not in ("", None) and cgi.escape(str(doctype_cdate), 1)) or (""),),
'doctype_mdate' : "%s" % ((doctype_mdate not in ("", None) and cgi.escape(str(doctype_mdate), 1)) or (""),),
'doctype_name' : cgi.escape(doctypename, 1),
'doctype_descr' : doctypedescr,
'performaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
}
return txt
def _tmpl_configure_doctype_overview_create_categories_view(self,
doctype="",
doctype_categories="",
jumpcategout="",
perform_act="doctypeconfigure"
):
"""Display the details of the categories for a given document type"""
## sanity checking for categories list:
if type(doctype_categories) not in (list, tuple):
doctype_categories = ()
txt = """
<table class="admin_wvar" width="100%%">
<thead>
<tr>
<th class="adminheaderleft">
Categories of Document Type %(doctype_id)s:
</th>
</tr>
</thead>
<tbody>
<tr>
<td><br />""" % { 'doctype_id' : cgi.escape(doctype, 1) }
modify_categ_txt = ""
try:
categs_tableheader = ["Categ ID", "Description", " ", " ", " ", " ", " ", " "]
categs_tablebody = []
num_categs = len(doctype_categories)
for i in range(0, num_categs):
this_categname = doctype_categories[i][0]
this_categdescr = doctype_categories[i][1]
this_categscore = doctype_categories[i][2]
t_row = [""" %s""" % cgi.escape(this_categname, 1),
""" %s""" % cgi.escape(this_categdescr, 1)]
## up arrow:
if i != 0:
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s&categid=%(categid)s&"""\
"""movecategup=1">"""\
"""<img border="0" src="%(siteurl)s/img/smallup.gif" title="Move Category Up" /></a>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'categid' : cgi.escape(str(this_categname), 1),
}
]
else:
## this is the first category - don't provide an arrow to move it up
t_row += [" "]
## down arrow:
if i != num_categs - 1:
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s&categid=%(categid)s&"""\
"""movecategdown=1">"""\
"""<img border="0" src="%(siteurl)s/img/smalldown.gif" title="Move Category Down" /></a>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'categid' : cgi.escape(str(this_categname), 1),
}
]
else:
## this is the first function - don't provide an arrow to move it up
t_row += [" "]
## 'jump-out' arrow:
if jumpcategout in ("", None):
## provide "move from" arrows for all categories:
if num_categs > 1:
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s&jumpcategout=%(categid)s">"""\
"""<img border="0" src="%(siteurl)s/img/move_from.gif" title="Move category [%(categid)s] """\
"""from score %(categscore)s" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'categid' : cgi.escape(str(this_categname), 1),
'categscore' : cgi.escape(str(this_categscore), 1),
}
]
else:
t_row += [" "]
else:
## there is a value for "jumpcategout", so a "moveto" button must be provided
if num_categs > 1:
## is this the categ that will be moved?
if jumpcategout == this_categname:
## yes it is - no "move-to" arrow here
t_row += [" "]
else:
## no it isn't - "move-to" arrow here
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s"""\
"""&jumpcategout=%(jumpcategout)s&jumpcategin=%(categid)s">"""\
"""<img border="0" src="%(siteurl)s/img/move_to.gif" title="Move category"""\
""" [%(jumpcategout)s] to this location" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'categid' : cgi.escape(str(this_categname), 1),
'jumpcategout' : cgi.escape(str(jumpcategout), 1),
}
]
else:
## there is only 1 category - cannot perform a "move"
t_row += [" "]
## 'edit' button:
t_row += ["""<form class="hyperlinkform" method="post" action="%(adminurl)s/%(performaction)s">""" \
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type""" \
"""="hidden" />""" \
"""<input class="hyperlinkformHiddenInput" name="categid" value="%(category)s" type""" \
"""="hidden" />""" \
"""<input type="submit" name="doctypecategoryedit" value="edit" """\
"""class="hyperlinkformSubmitButton" />""" \
"""</form>""" % { 'doctype' : cgi.escape(doctype, 1),
'category' : cgi.escape(str(this_categname), 1),
'performaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
}
]
## 'delete' button:
t_row += ["""<form class="hyperlinkform" method="post" action="%(adminurl)s/%(performaction)s">""" \
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type""" \
"""="hidden" />""" \
"""<input class="hyperlinkformHiddenInput" name="categid" value="%(category)s" type""" \
"""="hidden" />""" \
"""<input type="submit" name="doctypecategorydelete" value="delete" """\
"""class="hyperlinkformSubmitButton" />""" \
"""</form>""" % { 'doctype' : cgi.escape(doctype, 1),
'category' : cgi.escape(str(this_categname), 1),
'performaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
}
]
## 'jumping-out from' arrow:
if jumpcategout not in ("", None):
if jumpcategout == this_categname and num_categs > 1:
t_row += ["""<img border="0" src="%(siteurl)s/img/move_from.gif" title="Moving category """\
"""[%(categid)s] from this location (score %(categscore)s)" />"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'categid' : cgi.escape(str(this_categname), 1),
'categscore' : cgi.escape(str(this_categscore), 1),
}
]
else:
t_row += [" "]
else:
t_row += [" "]
## finally, append the newly created row to the tbody list:
categs_tablebody.append(t_row)
txt += create_html_table_from_tuple(tableheader=categs_tableheader, tablebody=categs_tablebody)
except IndexError:
## categs tuple was not in expected format ((sname, lname), (sname, lname)[, ...])
txt += """<span class="info">Unable to correctly display categories</span>"""
txt += """</td>
</tr>
<tr>
<td><br />
</td>
</tr>"""
## form to add a new category:
txt += """
<tr>
<td>
<span class="adminlabel">Add a new Category:</span><br />
<form method="post" action="%(adminurl)s/%(formaction)s">
<input name="doctype" type="hidden" value="%(doctype)s" />
<small style="color: navy;">ID: </small>
<input style="margin: 5px 10px 5px 10px;" name="categid" type="text" size="10" />
<small style="color: navy;">Description: </small>
<input style="margin: 5px 10px 5px 10px;" name="categdescr" type="text" size="25" />
<input name="doctypecategoryadd" class="adminbutton" type="submit" value="Add Category" />
</form>
</td>
</tr>
</tbody>
</table>""" % { 'formaction' : cgi.escape(perform_act, 1),
'doctype' : cgi.escape(doctype, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
}
return txt
def _tmpl_configure_doctype_overview_create_submissions_view(self,
doctype="",
doctype_submissions="",
add_actions_list=None,
perform_act="doctypeconfigure"
):
"""Display the details of the submissions for a given document type"""
## sanity checking for submissions list:
if type(doctype_submissions) not in (list, tuple):
doctype_submissions = ()
if type(add_actions_list) not in (list, tuple):
add_actions_list = ()
txt = """
<table class="admin_wvar" width="100%%">
<thead>
<tr>
<th class="adminheaderleft">
Submissions of Document Type %(doctype_id)s:
</th>
</tr>
</thead>
<tbody>
<tr>
<td><br />""" % { 'doctype_id' : cgi.escape(doctype, 1) }
try:
submissions_tableheader = ["Action", "Creation<br />Date", "Modification<br />Date", "Displayed?", "No.<br />Pages", \
"Button<br />Order", "Status<br />Text", "Level", "Score", "Stpage", "End<br />Text", \
"View Submission<br />Interface", "View Submission<br />Functions", \
"Edit Submission<br />Details", "Delete<br />Submission"]
submissions_tablebody = []
for subm in doctype_submissions:
submissions_tablebody.append( ("%s" % (cgi.escape(str(subm[2]), 1),),
"%s" % (cgi.escape(str(subm[5]), 1),),
"%s" % (cgi.escape(str(subm[6]), 1),),
"%s" % (cgi.escape(str(subm[3]), 1),),
"%s" % (cgi.escape(str(subm[4]), 1),),
"%s" % (cgi.escape(str(subm[7]), 1),),
"%s" % (cgi.escape(str(subm[8]), 1),),
"%s" % (cgi.escape(str(subm[9]), 1),),
"%s" % (cgi.escape(str(subm[10]), 1),),
"%s" % (cgi.escape(str(subm[11]), 1),),
"%s" % (cgi.escape(str(subm[12]), 1),),
"""<form class="hyperlinkform" method="get" action="%(adminurl)s/doctypeconfiguresubmissionpages">""" \
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type""" \
"""="hidden" />""" \
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type""" \
"""="hidden" />""" \
"""<input type="submit" name="viewSubmissionInterface" value="view interface" """\
"""class="hyperlinkformSubmitButton" />""" \
"""</form>""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(str(subm[2]), 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
},
"""<form class="hyperlinkform" method="get" action="%(adminurl)s/doctypeconfiguresubmissionfunctions">""" \
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type""" \
"""="hidden" />""" \
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type""" \
"""="hidden" />""" \
"""<input type="submit" name="viewSubmissionFunctions" value="view functions" """\
"""class="hyperlinkformSubmitButton" />""" \
"""</form>""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(str(subm[2]), 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
},
"""<form class="hyperlinkform" method="get" action="%(adminurl)s/%(formaction)s">""" \
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type""" \
"""="hidden" />""" \
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type""" \
"""="hidden" />""" \
"""<input type="submit" name="doctypesubmissionedit" value="edit submission" """\
"""class="hyperlinkformSubmitButton" />""" \
"""</form>""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(str(subm[2]), 1),
'formaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
},
"""<form class="hyperlinkform" method="get" action="%(adminurl)s/%(formaction)s">""" \
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type""" \
"""="hidden" />""" \
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type""" \
"""="hidden" />""" \
"""<input type="submit" name="doctypesubmissiondelete" value="delete submission" """\
"""class="hyperlinkformSubmitButton" />""" \
"""</form>""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(str(subm[2]), 1),
'formaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
}
) )
txt += create_html_table_from_tuple(tableheader=submissions_tableheader, tablebody=submissions_tablebody)
except IndexError:
## submissions tuple was not in expected format
txt += """<span class="info">Unable to correctly display details of submissions</span>"""
txt += """</td>
</tr>"""
## now, display a list of actions that can be added
txt += """
<tr>
<td>
<span class="adminlabel">Add a new Submission:</span><br />"""
if len(add_actions_list) > 0:
txt += """
<form method="get" action="%(adminurl)s/%(performaction)s">
<input type="hidden" name="doctype" value="%(doctype)s" />
%(submissions_list)s
<input name="doctypesubmissionadd" class="adminbutton" type="submit" value="Add Submission" />
</form>""" \
% { 'doctype' : cgi.escape(doctype, 1),
'performaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'submissions_list' : create_html_select_list(select_name="action", option_list=add_actions_list,
css_style="margin: 5px 10px 5px 10px;")
}
else:
txt += """
<br />
<span class="info">No Available Actions to Add</span>"""
txt += """
</td>
</tr>
</tbody>
</table>"""
return txt
def _tmpl_configure_doctype_overview_create_referees_view(self,
doctype="",
doctype_referees="",
perform_act="doctypeconfigure"
):
"""Display the details of the referees of the various categories of a given document type"""
## sanity checking for doctype_referees:
if type(doctype_referees) is not dict:
doctype_referees = {}
txt = """
<table class="admin_wvar" width="100%%">
<thead>
<tr>
<th class="adminheaderleft">
Manage Referees for Document Type %(doctype_id)s:
</th>
</tr>
</thead>
<tbody>
<tr>
<td><br />""" % { 'doctype_id' : cgi.escape(doctype, 1) }
try:
referees_tableheader = ["Referee"]
referees_tablebody = []
referee_roles = doctype_referees.keys()
referee_roles.sort()
for role in referee_roles:
if doctype_referees[role][0] == "*":
referees_tablebody.append( ("""<span style="color: navy;">%s</span>""" % (cgi.escape(doctype_referees[role][1], 1)),
" ") )
else:
referees_tablebody.append( ("""<span style="color: navy;">%s (%s)</span>""" % (cgi.escape(doctype_referees[role][0], 1), \
cgi.escape(doctype_referees[role][1], 1)),
" ") )
for referee in doctype_referees[role][2]:
referees_tablebody.append( ("""<small>%s</small>""" % (cgi.escape(referee[1], 1),),))
txt += create_html_table_from_tuple(tableheader=referees_tableheader, tablebody=referees_tablebody)
except IndexError:
## referees dictionary was not in expected format
txt += """<span class="info">Unable to correctly display details of referees</span>"""
txt += """
</td>
</tr>
<tr>
<td>
<form method="post" action="%(adminurl)s/referees.py">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input name="managerefereesdoctype" class="adminbutton" type="submit" value="Manage Referees" />
</form>
</td>
</tr>
</tbody>
</table>""" % { 'doctype_id' : cgi.escape(doctype, 1),
'performaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL_OLD, 1)
}
return txt
def tmpl_configure_doctype_overview(self, doctype="", doctypename="", doctypedescr="", doctype_cdate="", doctype_mdate="", \
doctype_categories="", jumpcategout="", doctype_submissions="", \
doctype_referees="", user_msg="", add_actions_list=None, perform_act="doctypeconfigure"):
"""TODO : DOCSTRING"""
## sanity checking:
if type(doctype_categories) not in (list, tuple):
doctype_categories = ()
if type(doctype_submissions) not in (list, tuple):
doctype_submissions = ()
if type(add_actions_list) not in (list, tuple):
add_actions_list = ()
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
## table containing document type details:
body_content += """<br />%s""" % (self._tmpl_configire_doctype_overview_create_doctype_details(doctype=doctype,
doctypename=doctypename,
doctypedescr=doctypedescr,
doctype_cdate=doctype_cdate,
doctype_mdate=doctype_mdate,
perform_act=perform_act
)
)
body_content += """<hr style="width: 80%%;" />"""
## this document type's submissions:
body_content += """<br />%s""" % (self._tmpl_configure_doctype_overview_create_submissions_view(doctype=doctype,
doctype_submissions=doctype_submissions,
add_actions_list=add_actions_list,
perform_act=perform_act
)
)
body_content += """<hr style="width: 80%%;" />"""
## table containing document type's categories:
body_content += """<br />%s""" % (self._tmpl_configure_doctype_overview_create_categories_view(doctype=doctype,
doctype_categories=doctype_categories,
jumpcategout=jumpcategout,
perform_act=perform_act
)
)
body_content += """<hr style="width: 80%%;" />"""
## button for allocation of referees:
body_content += """<br />%s""" % (self._tmpl_configure_doctype_overview_create_referees_view(doctype=doctype,
doctype_referees=doctype_referees,
perform_act=perform_act
)
)
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Configure Document Type:", datalist=[body_content])
return output
def tmpl_display_edit_category_form(self, doctype, categid, categdescr, user_msg="", perform_act="doctypeconfigure"):
output = ""
body_content = "<div>"
output += self._create_user_message_string(user_msg)
body_content += """
<form method="get" action="%(adminurl)s/%(performaction)s">
<table width="90%%">
<tr>
<td width="20%%"><span class="adminlabel">Category Name:</span></td>
<td width="80%%"><span class="info">%(categid)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Category Description:</span></td>
<td width="80%%"><input type="text" size="60" name="categdescr" value="%(categdescr)s" /></td>
</tr>
<tr>
<td width="20%%"> </td>
<td width="80%%">
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categid" value="%(categid)s" />
<input name="doctypecategoryeditcommit" class="adminbutton" type="submit" value="Save Details" />
<input name="doctypecategoryeditcancel" class="adminbutton" type="submit" value="Cancel" />
</td>
</tr>
</table>
</form>
""" % {
'categid' : cgi.escape(categid, 1),
'doctype' : cgi.escape(doctype, 1),
'categdescr' : cgi.escape(categdescr, 1),
'performaction' : cgi.escape(perform_act, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Edit Details of '%(categid)s' Category of '%(doctype)s' Document Type:" \
% { 'doctype' : cgi.escape(doctype, 1), 'categid' : cgi.escape(categid, 1) },
datalist=[body_content])
return output
def tmpl_configuredoctype_add_submissionfunction(self,
doctype,
action,
cursubmissionfunctions,
allWSfunctions,
addfunctionname="",
addfunctionstep="",
addfunctionscore="",
perform_act="doctypeconfiguresubmissionfunctions",
user_msg=""):
## sanity checking:
if type(cursubmissionfunctions) not in (list, tuple):
submissionfunctions = ()
if type(allWSfunctions) not in (list, tuple):
allWSfunctions = ()
output = ""
output += self._create_user_message_string(user_msg)
## display a form to add a function to the submission:
body_content = """
<br />
<table class="admin_wvar" width="55%%">
<thead>
<tr>
<th class="adminheaderleft" colspan="2">
Add function:
</th>
</tr>
</thead>
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%">
<form method="get" action="%(adminurl)s/%(performaction)s">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
</td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Function Name:</span></td>
<td width="80%%"><span class="info">%(allWSfunctions)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Step:</span></td>
<td width="80%%"><span class="info"><input name="addfunctionstep" type="text" value="%(step)s" size="5" /></span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Score:</span></td>
<td width="80%%"><span class="info"><input name="addfunctionscore" type="text" value="%(score)s" size="5" /></span></td>
</tr>
<tr>
<td colspan="2">
<table>
<tr>
<td>
<input name="configuresubmissionaddfunctioncommit" class="adminbutton" type="submit" value="Save Details" />
</form>
</td>
<td>
<br />
<form method="post" action="%(adminurl)s/%(performaction)s">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="configuresubmissionaddfunctioncancel" class="adminbutton" type="submit" value="Cancel" />
</form>
</td>
</tr>
</table>
</td>
</tr>
</table>""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'step' : cgi.escape(addfunctionstep, 1),
'score' : cgi.escape(addfunctionscore, 1),
'allWSfunctions' : create_html_select_list(select_name="addfunctionname",
option_list=allWSfunctions,
selected_values=addfunctionname,
default_opt=("", "Select function to add:"))
}
## build a table of the functions currently associated with the submission:
body_content += """<hr />\n"""
header = ["Function Name", "Step", "Score"]
tbody = []
for functn in cursubmissionfunctions:
thisfunctionname = functn[0]
thisfunctionstep = str(functn[1])
thisfunctionscore = str(functn[2])
## function name:
t_row = [""" %s""" % (cgi.escape(thisfunctionname, 1),)]
## function step:
t_row += ["""%s""" % (cgi.escape(thisfunctionstep, 1),) ]
## function score:
t_row += ["""%s""" % (cgi.escape(thisfunctionscore, 1),) ]
## finally, append the newly created row to the tbody list:
tbody.append(t_row)
body_content += """
<table class="admin_wvar" width="55%%">
<thead>
<tr>
<th class="adminheaderleft">
Current submission functions configuration:
</th>
</tr>
</thead>
<tbody>
<tr>
<td width="100%%"> </td>
</tr>
<tr>
<td width="100%%">"""
body_content += create_html_table_from_tuple(tableheader=header, tablebody=tbody)
body_content += """
</td>
</tr>
</table>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="""Add a function to the [%s] submission of the [%s] document type""" \
% (cgi.escape(action, 1), cgi.escape(doctype, 1)), datalist=[body_content])
return output
def tmpl_configuredoctype_display_submissionfunctions(self,
doctype,
action,
submissionfunctions,
movefromfunctionname="",
movefromfunctionstep="",
movefromfunctionscore="",
perform_act="doctypeconfiguresubmissionfunctions",
user_msg=""):
"""Create the page body used for displaying all Websubmit functions.
@param functions: A tuple of tuples containing the function name, and the function
description (function, description).
@param user_msg: Any message to be displayed on screen, such as a status report for the last task, etc.
return: HTML page body.
"""
## sanity checking:
if type(submissionfunctions) not in (list, tuple):
submissionfunctions = ()
output = ""
output += self._create_user_message_string(user_msg)
body_content = """<div><br />\n"""
header = ["Function Name", " ", " ", " ", "Step", "Score", "View Parameters", "Delete", " "]
tbody = []
num_functions = len(submissionfunctions)
for i in range(0, num_functions):
thisfunctionname = submissionfunctions[i][0]
thisfunctionstep = str(submissionfunctions[i][1])
thisfunctionscore = str(submissionfunctions[i][2])
t_row = [""" %s""" % (cgi.escape(thisfunctionname, 1),)]
## up arrow:
if i != 0:
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s&action=%(action)s&"""\
"""moveupfunctionname=%(func)s&moveupfunctionstep=%(step)s&moveupfunctionscore=%(score)s">"""\
"""<img border="0" src="%(siteurl)s/img/smallup.gif" title="Move Function Up" /></a>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'func' : cgi.escape(thisfunctionname, 1),
'step' : cgi.escape(thisfunctionstep, 1),
'score' : cgi.escape(thisfunctionscore, 1)
}
]
else:
## this is the first function - don't provide an arrow to move it up
t_row += [" "]
## down arrow:
if num_functions > 1 and i < num_functions - 1:
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s&action=%(action)s&"""\
"""movedownfunctionname=%(func)s&movedownfunctionstep=%(step)s&movedownfunctionscore=%(score)s">"""\
"""<img border="0" src="%(siteurl)s/img/smalldown.gif" title="Move Function Down" /></a>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'func' : cgi.escape(thisfunctionname, 1),
'step' : cgi.escape(thisfunctionstep, 1),
'score' : cgi.escape(thisfunctionscore, 1)
}
]
else:
t_row += [" "]
if movefromfunctionname in ("", None):
## provide "move from" arrows for all functions
if num_functions > 1:
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s&action=%(action)s&"""\
"""movefromfunctionname=%(func)s&movefromfunctionstep=%(step)s&movefromfunctionscore=%(score)s">"""\
"""<img border="0" src="%(siteurl)s/img/move_from.gif" title="Move %(func)s (step %(step)s, score %(score)s)"""\
""" from this location" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'func' : cgi.escape(thisfunctionname, 1),
'step' : cgi.escape(thisfunctionstep, 1),
'score' : cgi.escape(thisfunctionscore, 1)
}
]
else:
t_row += [" "]
else:
## there is a value for "movefromfunctionname", so a "moveto" button must be provided
if num_functions > 1:
## is this the function that will be moved?
if movefromfunctionname == thisfunctionname and \
movefromfunctionstep == thisfunctionstep and \
movefromfunctionscore == thisfunctionscore:
## yes it is - no "move-to" arrow here
t_row += [" "]
else:
## no it isn't - "move-to" arrow here
t_row += ["""<a href="%(adminurl)s/%(performaction)s?doctype=%(doctype)s&action=%(action)s&"""\
"""movefromfunctionname=%(fromfunc)s&movefromfunctionstep=%(fromstep)s&movefromfunctionscore=%(fromscore)s&"""\
"""movetofunctionname=%(tofunc)s&movetofunctionstep=%(tostep)s&movetofunctionscore=%(toscore)s">"""\
"""<img border="0" src="%(siteurl)s/img/move_to.gif" title="Move %(fromfunc)s (step %(fromstep)s, score %(fromscore)s)"""\
""" to this location (step %(tostep)s, score %(toscore)s)" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'fromfunc' : cgi.escape(movefromfunctionname, 1),
'fromstep' : cgi.escape(movefromfunctionstep, 1),
'fromscore' : cgi.escape(movefromfunctionscore, 1),
'tofunc' : cgi.escape(thisfunctionname, 1),
'tostep' : cgi.escape(thisfunctionstep, 1),
'toscore' : cgi.escape(thisfunctionscore, 1)
}
]
else:
## there is only 1 function - cannot perform a "move"!
t_row += [" "]
## function step:
t_row += ["""%s""" % (cgi.escape(thisfunctionstep, 1),) ]
## function score:
t_row += ["""%s""" % (cgi.escape(thisfunctionscore, 1),) ]
## "view parameters" link:
t_row += ["""<form class="hyperlinkform" method="get" action="%(adminurl)s/doctypeconfiguresubmissionfunctionsparameters">"""\
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="functionname" value="%(thisfunctionname)s" type="hidden" />"""\
"""<input type="submit" name="viewfunctionparameters" value="view parameters" class="hyperlinkformSubmitButton" />"""\
"""</form>\n"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'thisfunctionname' : cgi.escape(thisfunctionname, 1)
} ]
## "delete function" link:
t_row += ["""<form class="hyperlinkform" method="get" action="%(adminurl)s/%(performaction)s">"""\
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="deletefunctionname" value="%(thisfunctionname)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="deletefunctionstep" value="%(step)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="deletefunctionscore" value="%(score)s" type="hidden" />"""\
"""<input type="submit" name="deletefunction" value="delete" class="hyperlinkformSubmitButton" />"""\
"""</form>\n"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'thisfunctionname' : cgi.escape(thisfunctionname, 1),
'step' : cgi.escape(thisfunctionstep, 1),
'score' : cgi.escape(thisfunctionscore, 1)
} ]
## final column containing "jumping-out from" image when moving a function:
if movefromfunctionname not in ("", None):
if movefromfunctionname == thisfunctionname and \
movefromfunctionstep == thisfunctionstep and \
movefromfunctionscore == thisfunctionscore and \
num_functions > 1:
t_row += ["""<img border="0" src="%(siteurl)s/img/move_from.gif" title="Moving %(fromfunc)s (step %(fromstep)s, """\
"""score %(fromscore)s) from this location" />"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'fromfunc' : cgi.escape(movefromfunctionname, 1),
'fromstep' : cgi.escape(movefromfunctionstep, 1),
'fromscore' : cgi.escape(movefromfunctionscore, 1)
}
]
else:
t_row += [" "]
else:
t_row += [" "]
## finally, append the newly created row to the tbody list:
tbody.append(t_row)
body_content += create_html_table_from_tuple(tableheader=header, tablebody=tbody)
body_content += """</div>"""
## buttons for "add a function" and "finished":
body_content += """
<table>
<tr>
<td>
<br />
<form method="post" action="%(adminurl)s/doctypeconfiguresubmissionfunctions">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="configuresubmissionaddfunction" class="adminbutton" type="submit" value="Add a Function" />
</form>
</td>
<td>
<br />
<form method="post" action="%(adminurl)s/doctypeconfigure">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="funishedviewsubmissionfunctions" class="adminbutton" type="submit" value="Finished" />
</form>
</td>
</tr>
</table>""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="""Functions of the "%s" Submission of the "%s" Document Type:""" \
% (cgi.escape(action, 1), cgi.escape(doctype, 1)), datalist=[body_content])
return output
def _tmpl_configuredoctype_submissionfield_display_changeable_fields(self,
fieldtext="",
fieldlevel="",
fieldshortdesc="",
fieldcheck="",
allchecks=""):
"""Used when displaying the details of a submission field that is to be edited or inserted onto a
submission page.
This function creates the form elements for the values that can be edited by the user, such as the field's
label, short description, etc. (Examples of details of the submission field that could not be edited by the
user and are therefore not included in this function, are the creation-date/modification-date of the field,
etc.
@param fieldtext: (string) the label used for a field
@param fieldlevel: (char) 'M' or 'O' - whether a field is Mandatory or Optional
@param fieldshortdesc: (string) the short description of a field
@param fieldcheck: (string) the JavaScript checking function applied to a field
@param allchecks: (tuple of strings) the names of all WebSubmit JavaScript checks
@return: (string) a section of a form
"""
## sanity checking
if type(allchecks) not in (tuple, list):
allchecks = []
## make form-section
txt = """
<tr>
<td width="20%%"><span class="adminlabel">Field Label:</span></td>
<td width="80%%"><br /><textarea name="fieldtext" rows="5" cols="50">%(fieldtext)s</textarea><br /><br /></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Field Level:</span></td>
<td width="80%%"><span>%(fieldlevel)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Field Short Description:</span></td>
<td width="80%%"><br /><input type="text" size="35" name="fieldshortdesc" value="%(fieldshortdesc)s" /><br /><br /></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">JavaScript Check:</span></td>
<td width="80%%"><span>%(fieldcheck)s</span></td>
</tr>""" % { 'fieldtext' : cgi.escape(fieldtext, 1),
'fieldlevel' : create_html_select_list(select_name="fieldlevel",
option_list=(("M", "Mandatory"), ("O", "Optional")),
selected_values=fieldlevel
),
'fieldshortdesc' : cgi.escape(fieldshortdesc, 1),
'fieldcheck' : create_html_select_list(select_name="fieldcheck",
option_list=allchecks,
selected_values=fieldcheck,
default_opt=("", "--NO CHECK--")
)
}
return txt
def tmpl_configuredoctype_add_submissionfield(self,
doctype="",
action="",
pagenum="",
fieldname="",
fieldtext="",
fieldlevel="",
fieldshortdesc="",
fieldcheck="",
allchecks="",
allelements="",
user_msg="",
perform_act="doctypeconfiguresubmissionpageelements"):
## sanity checking
if type(allelements) not in (tuple, list):
allelements = []
## begin template:
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
body_content += """
<table class="admin_wvar" width="95%%">
<thead>
<tr>
<th class="adminheaderleft" colspan="2">
Add a field to page %(pagenum)s of submission %(submission)s
</th>
</tr>
</thead>
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%"> <form method="get" action="%(adminurl)s/%(performaction)s"></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Page Number:</span></td>
<td width="80%%"><span class="info">%(pagenum)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Field Name:</span></td>
<td width="80%%">%(fieldname)s</td>
</tr>""" % { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'fieldname' : create_html_select_list(select_name="fieldname",
option_list=allelements,
selected_values=fieldname,
default_opt=("", "Select a Field:")
),
'pagenum' : cgi.escape(pagenum, 1),
'submission' : cgi.escape("%s%s" % (action, doctype), 1),
'performaction' : cgi.escape(perform_act, 1)
}
body_content += self._tmpl_configuredoctype_submissionfield_display_changeable_fields(fieldtext=fieldtext,
fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck,
allchecks=allchecks)
body_content += """
<tr>
<td colspan="2">
<table>
<tr>
<td>
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="pagenum" type="hidden" value="%(pagenum)s" />
<input name="addfieldcommit" class="adminbutton" type="submit" value="Add Field" />
</form>
</td>
<td>
<br />
<form method="post" action="%(adminurl)s/%(performaction)s">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="pagenum" type="hidden" value="%(pagenum)s" />
<input name="canceladdsubmissionfield" class="adminbutton" type="submit" value="Cancel" />
</form>
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>\n""" % { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Field Details:", datalist=[body_content])
return output
def tmpl_configuredoctype_edit_submissionfield(self,
doctype="",
action="",
pagenum="",
fieldnum="",
fieldname="",
fieldtext="",
fieldlevel="",
fieldshortdesc="",
fieldcheck="",
cd="",
md="",
allchecks="",
user_msg="",
perform_act="doctypeconfiguresubmissionpageelements"):
## begin template:
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
body_content += """
<table class="admin_wvar" width="95%%">
<thead>
<tr>
<th class="adminheaderleft" colspan="2">
Details of the %(fieldname)s field as it appears at position %(fieldnum)s on Page %(pagenum)s of the %(submission)s Submission:
</th>
</tr>
</thead>
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%"> <form method="get" action="%(adminurl)s/%(performaction)s"></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Page Number:</span></td>
<td width="80%%"><span class="info">%(pagenum)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Field Number:</span></td>
<td width="80%%"><span class="info">%(fieldnum)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Field Name:</span></td>
<td width="80%%"><span class="info">%(fieldname)s</span></td>
</tr>""" % { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(fieldnum, 1),
'fieldname' : cgi.escape(fieldname, 1),
'submission' : cgi.escape("%s%s" % (action, doctype), 1),
'performaction' : cgi.escape(perform_act, 1)
}
## field creation date:
if cd not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(cd), 1),)
## field last-modified date:
if md not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Last Modification Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(md), 1), )
body_content += self._tmpl_configuredoctype_submissionfield_display_changeable_fields(fieldtext=fieldtext,
fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck,
allchecks=allchecks)
body_content += """
<tr>
<td colspan="2">
<table>
<tr>
<td>
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="pagenum" type="hidden" value="%(pagenum)s" />
<input name="editfieldposn" type="hidden" value="%(fieldnum)s" />
<input name="editfieldposncommit" class="adminbutton" type="submit" value="Save Changes" />
</form>
</td>
<td>
<br />
<form method="post" action="%(adminurl)s/%(performaction)s">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="pagenum" type="hidden" value="%(pagenum)s" />
<input name="canceleditsubmissionfield" class="adminbutton" type="submit" value="Cancel" />
</form>
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>\n""" % { 'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(fieldnum, 1),
'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'performaction' : cgi.escape(perform_act, 1)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Field Details:", datalist=[body_content])
return output
def tmpl_configuredoctype_display_submissionpage_preview(self, doctype, action, pagenum, fields, user_msg=""):
"""Create a page displaying a simple preview of a submission page
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (string) the number of the page that is to be previewed
@param fields: a tuple of tuples, whereby each tuple contains the details of a field on the submission page:
(fieldname, check-name, field-type, size, rows, cols, field-description)
@param user_msg: a tuple or string, containing any message(s) to be displayed to the user
@return: a string, which makes up the page body
"""
## Sanity Checking of elements:
if type(fields) not in (list, tuple):
fields = ()
try:
if type(fields[0]) not in (tuple, list):
fields = ()
except IndexError:
pass
## begin template:
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
## hyperlink back to page details:
body_content += """
<div style="text-align: center;">
<a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&pagenum=%(pagenum)s">
Return to details of page [%(pagenum)s] of submission [%(submission)s]</a>
</div>
<hr />""" % { 'adminurl' : WEBSUBMITADMINURL,
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'submission' : cgi.escape("%s%s" % (action, doctype), 1)
}
body_content += """<div><br />
<form name="dummyeldisplay" action="%(adminurl)s">
<table class="admin_wvar" align="center">
<thead>
<tr>
<th class="adminheaderleft" colspan="1">
Page Preview:
</th>
</tr>
</thead>
<tbody>
<tr bgcolor="#f1f1f1">
<td>
<br />
""" % {'adminurl' : WEBSUBMITADMINURL}
for field in fields:
body_content += self._element_display_preview_get_element(elname=field[0], eltype=field[3], elsize=field[4],
elrows=field[5], elcols=field[6], elval=field[8],
elfidesc=field[7], ellabel=field[1])
body_content += "\n"
body_content += """ <br />
</td>
</tr>
</tbody>
</table>
</form>
</div>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Preview of Page %s of Submission %s:" \
% (pagenum, "%s%s" % (action, doctype)), datalist=[body_content])
return output
def tmpl_configuredoctype_list_submissionelements(self,
doctype,
action,
pagenum,
page_elements,
movefieldfromposn="",
user_msg=""):
## Sanity Checking of elements:
if type(page_elements) not in (list, tuple):
page_elements = ()
try:
if type(page_elements[0]) not in (tuple, list):
page_elements = ()
except IndexError:
pass
try:
int(movefieldfromposn)
except ValueError:
movefieldfromposn = ""
## begin template:
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
number_elements = len(page_elements)
if number_elements > 0:
body_content += """
<table width="100%%" class="admin_wvar">
<tbody>
<tr>
<td style="text-align: center;">
<br />
<form method="get" action="%(adminurl)s/doctypeconfiguresubmissionpagespreview">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="pagenum" value="%(pagenum)s" />
<input name="viewsubmissionpagepreview" class="adminbutton" type="submit" value="View Page Preview" />
</form>
</td>
</tr>
</table>""" % { 'adminurl' : WEBSUBMITADMINURL,
'doctype_id' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1)
}
t_header = [" ", " ", " ", " ", "Name", "Element Label",
"Level", "Short Descr.", "Check", "Creation Date", "Modification Date", " ",
" ", " ", " "]
t_body = []
for i in range(0, number_elements):
## Field number:
t_row = ["""%s""" % (cgi.escape(page_elements[i][1], 1),) ]
## Move a field from posn - to posn arrows:
if movefieldfromposn in ("", None):
## provide "move from" arrow for all element
if number_elements > 1:
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&movefieldfromposn=%(fieldnum)s">"""\
"""<img border="0" src="%(siteurl)s/img/move_from.gif" title="Move field at position %(fieldnum)s"""\
""" from this location" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(page_elements[i][1], 1)
}
]
else:
t_row += [" "]
else:
## there is a value for "movefieldfromposn", so a "moveto" button must be provided
if number_elements > 1:
## is this the field that will be moved?
if movefieldfromposn == page_elements[i][1]:
## yes it is - no "move-to" arrow here
t_row += [" "]
else:
## no it isn't - "move-to" arrow here
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&movefieldfromposn=%(movefieldfromposn)s&movefieldtoposn=%(fieldnum)s">"""\
"""<img border="0" src="%(siteurl)s/img/move_to.gif" title="Move field at position %(movefieldfromposn)s"""\
""" to this location at position %(fieldnum)s" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(page_elements[i][1], 1),
'movefieldfromposn' : cgi.escape(movefieldfromposn, 1)
}
]
else:
## there is only 1 field - cannot perform a "move"!
t_row += [" "]
## up arrow:
if i != 0:
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&movefieldfromposn=%(fieldnum)s&movefieldtoposn=%(previousfield)s">"""\
"""<img border="0" src="%(siteurl)s/img/smallup.gif" title="Move Element Up" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(page_elements[i][1], 1),
'previousfield' : cgi.escape(str(int(page_elements[i][1])-1), 1)
}
]
else:
## first element - don't provide up arrow:
t_row += [" "]
## down arrow:
if number_elements > 1 and i < number_elements - 1:
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&movefieldfromposn=%(fieldnum)s&movefieldtoposn=%(nextfield)s">"""\
"""<img border="0" src="%(siteurl)s/img/smalldown.gif" title="Move Element Down" /></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(page_elements[i][1], 1),
'nextfield' : cgi.escape(str(int(page_elements[i][1])+1), 1)
}
]
else:
t_row += [" "]
## Element Name:
t_row += ["""<span class="info">%s</span>""" % (cgi.escape(str(page_elements[i][2]), 1),) ]
## Element Label:
t_row += ["""%s""" % (cgi.escape(str(page_elements[i][3]), 1),) ]
## Level:
t_row += ["""%s""" % (cgi.escape(str(page_elements[i][4]), 1),) ]
## Short Descr:
t_row += ["""%s""" % (cgi.escape(str(page_elements[i][5]), 1),) ]
## Check:
t_row += ["""%s""" % (cgi.escape(str(page_elements[i][6]), 1),) ]
## Creation Date:
if page_elements[i][7] not in ("", None):
t_row += ["%s" % (cgi.escape(str(page_elements[i][7]), 1),)]
else:
t_row += [" "]
## Modification Date:
if page_elements[i][8] not in ("", None):
t_row += ["%s" % (cgi.escape(str(page_elements[i][8]), 1),)]
else:
t_row += [" "]
## View/Edit field:
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&editfieldposn=%(fieldnum)s"><small>edit</small></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(page_elements[i][1], 1)
}
]
## Delete Element from page:
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&deletefieldposn=%(fieldnum)s"><small>delete</small></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'fieldnum' : cgi.escape(page_elements[i][1], 1)
}
]
## View/Edit Element Definition:
t_row += ["""<a href="%(adminurl)s/elementedit?elname=%(elementname)s&doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s"><small>element</small></a>"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1),
'elementname' : cgi.escape(page_elements[i][2], 1)
}
]
## Jump element out-from:
t_row += [" "]
## final column containing "jumping-out from" image when moving a field:
if movefieldfromposn not in ("", None):
if movefieldfromposn == page_elements[i][1] and number_elements > 1:
t_row += ["""<img border="0" src="%(siteurl)s/img/move_from.gif" title="Move field at position %(fieldnum)s"""\
""" from this location" />"""\
% { 'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'fieldnum' : cgi.escape(page_elements[i][1], 1)
}
]
else:
t_row += [" "]
else:
t_row += [" "]
## finally, append the newly created row to the tbody list:
t_body.append(t_row)
## now create the table and include it into the page body:
body_content += """
<table width="100%%">
<tr>
<td colspan="2"><br />"""
body_content += create_html_table_from_tuple(tableheader=t_header, tablebody=t_body)
body_content += """
<br />
</td>
</tr>"""
body_content += """
<tr>
<td width="20%%"> </td>
<td width="80%%">
<table>
<tr>
<td>
<form method="get" action="%(adminurl)s/doctypeconfiguresubmissionpageelements">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="pagenum" value="%(pagenum)s" />
<input name="addfield" class="adminbutton" type="submit" value="Add a Field" />
</form>
</td>
<td>
<form method="get" action="%(adminurl)s/doctypeconfiguresubmissionpages">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input type="hidden" name="action" value="%(action)s" />
<input name="finishedviewfields" class="adminbutton" type="submit" value="Finished" />
</form>
</td>
</tr>
</table>
</td>
</tr>""" % { 'adminurl' : WEBSUBMITADMINURL,
'doctype_id' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(pagenum, 1)
}
body_content += """
</table>"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Submission Page Details:", datalist=[body_content])
return output
def tmpl_configuredoctype_edit_functionparameter_file(self, doctype, action, function, paramfilename,
paramfilecontent, paramname="", user_msg=""):
## begin template:
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
body_content += """
<table class="admin_wvar" width="95%%">
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%"> <form method="post" action="%(adminurl)s/doctypeconfiguresubmissionfunctionsparameters"></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Parameter Value:</span></td>
<td width="80%%"><textarea cols="115" rows="22" name="paramfilecontent">%(paramfilecontent)s</textarea></td>
</tr>
<tr>
<td colspan="2">
<table>
<tr>
<td>
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="functionname" type="hidden" value="%(function)s" />
<input name="paramname" type="hidden" value="%(paramname)s" />
<input name="paramfilename" type="hidden" value="%(paramfilename)s" />
<input name="editfunctionparameterfilecommit" class="adminbutton" type="submit" value="Save Changes" />
</form>
</td>
<td>
<br />
<form method="post" action="%(adminurl)s/doctypeconfiguresubmissionfunctionsparameters">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="functionname" type="hidden" value="%(function)s" />
<input name="editfunctionparameterfilecancel" class="adminbutton" type="submit" value="Cancel" />
</form>
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>\n""" % { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'function' : cgi.escape(function, 1),
'paramname' : cgi.escape(paramname, 1),
'paramfilename' : cgi.escape(paramfilename, 1),
'paramfilecontent' : cgi.escape(paramfilecontent, 1)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Edit the [%s] parameter file:" % (paramfilename,), datalist=[body_content])
return output
def tmpl_configuredoctype_edit_functionparameter_value(self,
doctype,
action,
function,
paramname,
paramval,
user_msg=""):
## begin template:
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
body_content += """
<table class="admin_wvar" width="95%%">
<tbody>
<tr>
<td width="20%%"> </td>
<td width="80%%"> <form method="get" action="%(adminurl)s/doctypeconfiguresubmissionfunctionsparameters"></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Parameter Value:</span></td>
<td width="80%%"><input type="text" size="35" name="paramval" value="%(paramval)s" /></td>
</tr>
<tr>
<td colspan="2">
<table>
<tr>
<td>
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="functionname" type="hidden" value="%(function)s" />
<input name="paramname" type="hidden" value="%(paramname)s" />
<input name="editfunctionparametervaluecommit" class="adminbutton" type="submit" value="Save Changes" />
</form>
</td>
<td>
<br />
<form method="post" action="%(adminurl)s/doctypeconfiguresubmissionfunctionsparameters">
<input name="doctype" type="hidden" value="%(doctype)s" />
<input name="action" type="hidden" value="%(action)s" />
<input name="functionname" type="hidden" value="%(function)s" />
<input name="editfunctionparametervaluecancel" class="adminbutton" type="submit" value="Cancel" />
</form>
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>\n""" % { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'function' : cgi.escape(function, 1),
'paramname' : cgi.escape(paramname, 1),
'paramval' : cgi.escape(paramval, 1)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Edit the value of the %s Parameter:" % (paramname,), datalist=[body_content])
return output
def tmpl_configuredoctype_list_functionparameters(self,
doctype,
action,
function,
params,
user_msg=""):
"""Display the parameters and their values for a given function as applied to a given document type
"""
linktoparamfile = 0
## sanity checking:
if type(params) not in (list, tuple):
params = ()
## make table of function parameters:
if function in FUNCTIONS_WITH_FILE_PARAMS:
linktoparamfile = 1
t_header = ["Parameter Name", "Parameter Value", "Edit Parameter", "%s" \
% ((linktoparamfile == 1 and "Edit File") or (" "),)]
t_body = []
num_params = len(params)
for i in range(0, num_params):
thisparamname = params[i][0]
thisparamval = params[i][1]
## parameter name:
t_row = [""" %s""" % (cgi.escape(thisparamname, 1),)]
## parameter value:
t_row += [""" <span class="info">%s</span>""" % (cgi.escape(thisparamval, 1),)]
## button to edit parameter value:
t_row += ["""<form class="hyperlinkform" method="get" action="%(adminurl)s/doctypeconfiguresubmissionfunctionsparameters">"""\
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="functionname" value="%(function)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="paramname" value="%(thisparamname)s" type="hidden" />"""\
"""<input type="submit" name="editfunctionparametervalue" value="edit value" class="hyperlinkformSubmitButton" />"""\
"""</form>\n"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'function' : cgi.escape(function, 1),
'thisparamname' : cgi.escape(thisparamname, 1)
} ]
## button to edit the value of a parameter's file:
editstr = """<form class="hyperlinkform" method="get" action="%(adminurl)s/doctypeconfiguresubmissionfunctionsparameters">"""\
"""<input class="hyperlinkformHiddenInput" name="doctype" value="%(doctype)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="action" value="%(action)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="functionname" value="%(function)s" type="hidden" />"""\
"""<input class="hyperlinkformHiddenInput" name="paramname" value="%(thisparamname)s" type="hidden" />"""\
"""<input type="submit" name="editfunctionparameterfile" value="edit file" class="hyperlinkformSubmitButton" />"""\
"""</form>\n"""\
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'function' : cgi.escape(function, 1),
'thisparamname' : cgi.escape(thisparamname, 1)
}
t_row += ["%s" % ((linktoparamfile == 1 and editstr) or (" "),)]
## finally, append the newly created row to the tbody list:
t_body.append(t_row)
## create display of page
output = ""
output += self._create_user_message_string(user_msg)
body_content = """
<table class="admin_wvar" width="100%%">
<tbody>
<tr>
<td>
<br />
%(paramstable)s
<br />
</td>
</tr>
<tr>
<td>
<form method="get" action="%(adminurl)s/doctypeconfiguresubmissionfunctions">
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input name="finishedviewfields" class="adminbutton" type="submit" value="Finished" />
</form>
</td>
</tr>
</table>""" % { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'paramstable' : create_html_table_from_tuple(tableheader=t_header, tablebody=t_body)
}
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="""Parameters of the %(function)s Function, belonging to the %(doctype)s Document Type:"""\
% { 'function' : cgi.escape(function, 1), 'doctype' : cgi.escape(doctype, 1) },
datalist=[body_content])
return output
def tmpl_configuredoctype_list_submissionpages(self,
doctype,
action,
number_pages,
cd="",
md="",
deletepagenum="",
user_msg=""):
## sanity checking:
try:
number_pages = int(number_pages)
except ValueError:
number_pages = 0
deletepagenum = str(deletepagenum)
output = ""
body_content = ""
output += self._create_user_message_string(user_msg)
body_content += """
<table width="90%%">
<tr>
<td width="20%%"><span class="adminlabel">Document Type ID:</span></td>
<td width="80%%"><span class="info">%(doctype_id)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Submission ID:</span></td>
<td width="80%%"><span class="info">%(action)s</span></td>
</tr>
<tr>
<td width="20%%"><span class="adminlabel">Number of Pages:</span></td>
<td width="80%%"><span class="info">%(num_pages)s</span></td>
</tr>""" % { 'doctype_id' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'num_pages' : cgi.escape(str(number_pages), 1)
}
if cd not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Creation Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(cd), 1),)
if md not in ("", None):
body_content += """
<tr>
<td width="20%%"><span class="adminlabel">Last Modification Date:</span></td>
<td width="80%%"><span class="info">%s</span></td>
</tr>""" % (cgi.escape(str(md), 1), )
## EITHER: Make a table of links to each page -OR-
## prompt for confirmation of deletion of a page:
if deletepagenum == "":
## This is a normal visit to display details of a submission's pages
## make a table of links to each page:
t_header = ["Page", " ", " ", "View Page", "Delete"]
t_body = []
for i in range(1, number_pages + 1):
t_row = ["""Page %d""" % (i,)]
## up arrow:
if i != 1:
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpages?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&movepage=true&movepagedirection=up">"""\
"""<img border="0" src="%(siteurl)s/img/smallup.gif" title="Move Page Up" /></a>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(str(i), 1)
}
]
else:
## this is the first function - don't provide an arrow to move it up
t_row += [" "]
## down arrow:
if number_pages > 1 and i < number_pages:
t_row += ["""<a href="%(adminurl)s/doctypeconfiguresubmissionpages?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&movepage=true&movepagedirection=down">"""\
"""<img border="0" src="%(siteurl)s/img/smalldown.gif" title="Move Page Down" /></a>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'siteurl' : cgi.escape(CFG_SITE_URL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(str(i), 1)
}
]
else:
t_row += [" "]
## "view page" link:
t_row += ["""<small><a href="%(adminurl)s/doctypeconfiguresubmissionpageelements?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s">view page</a></small>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(str(i), 1)
}
]
## "delete page" link:
t_row += ["""<small><a href="%(adminurl)s/doctypeconfiguresubmissionpages?doctype=%(doctype)s&action=%(action)s&"""\
"""pagenum=%(pagenum)s&deletepage=true">delete page</a></small>""" \
% { 'adminurl' : cgi.escape(WEBSUBMITADMINURL, 1),
'doctype' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(str(i), 1)
}
]
## finally, append the newly created row to the tbody list:
t_body.append(t_row)
## now create the table and include it into the page body:
body_content += """
<tr>
<td colspan="2"><br />"""
body_content += create_html_table_from_tuple(tableheader=t_header, tablebody=t_body)
body_content += """
<br />
</td>
</tr>"""
body_content += """
<tr>
<td width="20%%"> </td>
<td width="80%%">
<table>
<tr>
<td>
<form method="get" action="%(adminurl)s/doctypeconfiguresubmissionpages">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input type="hidden" name="action" value="%(action)s" />
<input name="addpage" class="adminbutton" type="submit" value="Add a Page" />
</form>
</td>
<td>
<form method="get" action="%(adminurl)s/doctypeconfigure">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input name="finishedviewpages" class="adminbutton" type="submit" value="Finished" />
</form>
</td>
</tr>
</table>
</td>
</tr>""" % { 'adminurl' : WEBSUBMITADMINURL,
'doctype_id' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1)
}
else:
## user has requested the deletion of a page from the current submission, and this visit should
## simply prompt them for confirmation:
body_content += """
<tr>
<td width="20%%"> </td>
<td width="80%%"><br /><span class="info">REALLY delete page %(pagenum)s and all of its associated interface elements from """\
"""this submission? You CANNOT undo this!</span></td>
</tr>
<tr>
<td width="20%%"> </td>
<td width="80%%">
<table>
<tr>
<td>
<form method="get" action="%(adminurl)s/doctypeconfiguresubmissionpages">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="deletepage" value="true" />
<input type="hidden" name="pagenum" value="%(pagenum)s" />
<input name="deletepageconfirm" class="adminbutton" type="submit" value="Confirm" />
</form>
</td>
<td>
<form method="get" action="%(adminurl)s/doctypeconfiguresubmissionpages">
<input type="hidden" name="doctype" value="%(doctype_id)s" />
<input type="hidden" name="action" value="%(action)s" />
<input name="cancelpagedelete" class="adminbutton" type="submit" value="No! Stop!" />
</form>
</td>
</tr>
</table>
</td>
</tr>""" % { 'adminurl' : WEBSUBMITADMINURL,
'doctype_id' : cgi.escape(doctype, 1),
'action' : cgi.escape(action, 1),
'pagenum' : cgi.escape(deletepagenum, 1)
}
body_content += """
</table>
"""
output += self._create_websubmitadmin_main_menu_header()
output += self._create_adminbox(header="Submission Page Details:", datalist=[body_content])
return output
| gpl-2.0 |
cucumber/cucumber | gherkin/python/gherkin/pickles/compiler.py | 2 | 6970 | import re
from ..count_symbols import count_symbols
from ..stream.id_generator import IdGenerator
class Compiler(object):
def __init__(self, id_generator=None):
self.id_generator = id_generator
if self.id_generator is None:
self.id_generator = IdGenerator()
def compile(self, gherkin_document):
pickles = []
if 'feature' not in gherkin_document:
return pickles
feature = gherkin_document['feature']
if not feature['children']:
return pickles
uri = gherkin_document['uri']
feature_tags = feature['tags']
language = feature['language']
background_steps = []
for scenario_definition in feature['children']:
if 'background' in scenario_definition:
if scenario_definition['background']['steps']:
background_steps += scenario_definition['background']['steps']
elif 'rule' in scenario_definition:
self._compile_rule(uri, feature_tags, background_steps, scenario_definition['rule'], language, pickles)
else:
scenario = scenario_definition['scenario']
args = (uri, feature_tags, background_steps, scenario, language, pickles)
if not scenario['examples']:
self._compile_scenario(*args)
else:
self._compile_scenario_outline(*args)
return pickles
def _compile_rule(self, uri, feature_tags, feature_background_steps, rule, language, pickles):
tags = list(feature_tags) + list(rule['tags'])
background_steps = []
background_steps += feature_background_steps
for scenario_definition in rule['children']:
if 'background' in scenario_definition:
if scenario_definition['background']['steps']:
background_steps += scenario_definition['background']['steps']
else:
scenario = scenario_definition['scenario']
args = (uri, tags, background_steps, scenario, language, pickles)
if not scenario['examples']:
self._compile_scenario(*args)
else:
self._compile_scenario_outline(*args)
return pickles
def _compile_scenario(self, uri, inherited_tags, background_steps, scenario, language, pickles):
tags = list(inherited_tags) + list(scenario['tags'])
steps = list()
if scenario['steps']:
steps.extend(self._pickle_steps(background_steps + scenario['steps']))
pickle = {
'astNodeIds': [scenario['id']],
'id': self.id_generator.get_next_id(),
'tags': self._pickle_tags(tags),
'name': scenario['name'],
'language': language,
'steps': steps,
'uri': uri
}
pickles.append(pickle)
def _compile_scenario_outline(self, uri, inherited_tags, background_steps, scenario, language, pickles):
for examples in (e for e in scenario['examples'] if 'tableHeader' in e):
variable_cells = examples['tableHeader']['cells']
for values in examples['tableBody']:
value_cells = values['cells']
steps = list()
if scenario['steps']:
steps.extend(self._pickle_steps(background_steps))
tags = list(inherited_tags) + list(scenario['tags']) + list(examples['tags'])
if scenario['steps']:
for outline_step in scenario['steps']:
step_text = self._interpolate(
outline_step['text'],
variable_cells,
value_cells)
argument = self._create_pickle_arguments(
outline_step,
variable_cells,
value_cells)
_pickle_step = {
'astNodeIds': [outline_step['id'], values['id']],
'id': self.id_generator.get_next_id(),
'text': step_text
}
if argument is not None:
_pickle_step['argument'] = argument
steps.append(_pickle_step)
pickle = {
'astNodeIds': [scenario['id'], values['id']],
'id': self.id_generator.get_next_id(),
'name': self._interpolate(
scenario['name'],
variable_cells,
value_cells),
'language': language,
'steps': steps,
'tags': self._pickle_tags(tags),
'uri': uri
}
pickles.append(pickle)
def _create_pickle_arguments(self, step, variables, values):
if 'dataTable' in step:
table = {'rows': []}
for row in step['dataTable']['rows']:
cells = [
{
'value': self._interpolate(cell['value'], variables, values)
} for cell in row['cells']
]
table['rows'].append({'cells': cells})
return {'dataTable': table}
elif 'docString' in step:
argument = step['docString']
docstring = {
'content': self._interpolate(argument['content'], variables, values)
}
if 'mediaType' in argument:
docstring['mediaType'] = self._interpolate(argument['mediaType'], variables, values)
return {'docString': docstring}
else:
return None
def _interpolate(self, name, variable_cells, value_cells):
if name is None:
return name
for n, variable_cell in enumerate(variable_cells):
value_cell = value_cells[n]
name = re.sub(
u'<{0[value]}>'.format(variable_cell),
value_cell['value'],
name
)
return name
def _pickle_steps(self, steps):
return [self._pickle_step(step)for step in steps]
def _pickle_step(self, step):
pickle_step = {
'astNodeIds': [step['id']],
'id': self.id_generator.get_next_id(),
'text': step['text'],
}
argument = self._create_pickle_arguments(
step,
[],
[])
if argument is not None:
pickle_step['argument'] = argument
return pickle_step
def _pickle_tags(self, tags):
return [self._pickle_tag(tag) for tag in tags]
def _pickle_tag(self, tag):
return {
'astNodeId': tag['id'],
'name': tag['name']
}
| mit |
timsnyder/bokeh | bokeh/sampledata/unemployment.py | 2 | 2473 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' This modules exposes per-county unemployment data for Unites States in
2009. It exposes a dictionary ``data`` which is indexed by the two-tuple
containing ``(state_id, county_id)`` and has the unemployment rate (2009) as
the associated value.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import csv
# External imports
# Bokeh imports
from ..util.sampledata import external_path, open_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
data = {}
with open_csv(external_path('unemployment09.csv')) as f:
reader = csv.reader(f, delimiter=str(','), quotechar=str('"'))
for row in reader:
dummy, state_id, county_id, dumm, dummy, dummy, dummy, dummy, rate = row
data[(int(state_id), int(county_id))] = float(rate)
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = _read_data()
| bsd-3-clause |
sils1297/coala | tests/bearlib/languages/documentation/DocstyleDefinitionTest.py | 2 | 6077 | import os.path
from tempfile import TemporaryDirectory
import unittest
from unittest.mock import patch
from coalib.bearlib.languages.documentation.DocstyleDefinition import (
DocstyleDefinition)
class DocstyleDefinitionTest(unittest.TestCase):
Metadata = DocstyleDefinition.Metadata
dummy_metadata = Metadata(":param ", ":", ":return:")
def test_fail_instantation(self):
with self.assertRaises(ValueError):
DocstyleDefinition("PYTHON", "doxyGEN",
(("##", "#"),), self.dummy_metadata)
with self.assertRaises(ValueError):
DocstyleDefinition("WEIRD-PY",
"schloxygen",
(("##+", "x", "y", "z"),),
self.dummy_metadata)
with self.assertRaises(ValueError):
DocstyleDefinition("PYTHON",
"doxygen",
(("##", "", "#"), ('"""', '"""')),
self.dummy_metadata)
with self.assertRaises(TypeError):
DocstyleDefinition(123, ["doxygen"], (('"""', '"""')),
self.dummy_metadata)
with self.assertRaises(TypeError):
DocstyleDefinition("language", ["doxygen"], (('"""', '"""')),
"metdata")
def test_properties(self):
uut = DocstyleDefinition("C", "doxygen",
(("/**", "*", "*/"),), self.dummy_metadata)
self.assertEqual(uut.language, "c")
self.assertEqual(uut.docstyle, "doxygen")
self.assertEqual(uut.markers, (("/**", "*", "*/"),))
self.assertEqual(uut.metadata, self.dummy_metadata)
uut = DocstyleDefinition("PYTHON", "doxyGEN",
[("##", "", "#")], self.dummy_metadata)
self.assertEqual(uut.language, "python")
self.assertEqual(uut.docstyle, "doxygen")
self.assertEqual(uut.markers, (("##", "", "#"),))
self.assertEqual(uut.metadata, self.dummy_metadata)
uut = DocstyleDefinition("I2C",
"my-custom-tool",
(["~~", "/~", "/~"], (">!", ">>", ">>")),
self.dummy_metadata)
self.assertEqual(uut.language, "i2c")
self.assertEqual(uut.docstyle, "my-custom-tool")
self.assertEqual(uut.markers, (("~~", "/~", "/~"), (">!", ">>", ">>")))
self.assertEqual(uut.metadata, self.dummy_metadata)
uut = DocstyleDefinition("Cpp", "doxygen",
("~~", "/~", "/~"), self.dummy_metadata)
self.assertEqual(uut.language, "cpp")
self.assertEqual(uut.docstyle, "doxygen")
self.assertEqual(uut.markers, (("~~", "/~", "/~"),))
self.assertEqual(uut.metadata, self.dummy_metadata)
def test_load(self):
# Test unregistered docstyle.
with self.assertRaises(FileNotFoundError):
next(DocstyleDefinition.load("PYTHON", "INVALID"))
# Test unregistered language in existing docstyle.
with self.assertRaises(KeyError):
next(DocstyleDefinition.load("bake-a-cake", "default"))
# Test wrong argument type.
with self.assertRaises(TypeError):
next(DocstyleDefinition.load(123, ["list"]))
# Test python 3 default configuration and if everything is parsed
# right.
result = DocstyleDefinition.load("PYTHON3", "default")
self.assertEqual(result.language, "python3")
self.assertEqual(result.docstyle, "default")
self.assertEqual(result.markers, (('"""', '', '"""'),))
self.assertEqual(result.metadata, self.dummy_metadata)
def test_get_available_definitions(self):
# Test if the basic supported docstyle-language pairs exist.
expected = {('default', 'python'),
('default', 'python3'),
('default', 'java'),
('doxygen', 'c'),
('doxygen', 'cpp'),
('doxygen', 'cs'),
('doxygen', 'fortran'),
('doxygen', 'java'),
('doxygen', 'python'),
('doxygen', 'python3'),
('doxygen', 'tcl'),
('doxygen', 'vhdl'),
('doxygen', 'php'),
('doxygen', 'objective-c')}
real = set(DocstyleDefinition.get_available_definitions())
self.assertTrue(expected.issubset(real))
@patch('coalib.bearlib.languages.documentation.DocstyleDefinition.iglob')
@patch('coalib.bearlib.languages.documentation.DocstyleDefinition'
'.ConfParser')
def test_get_available_definitions_on_wrong_files(self,
confparser_mock,
iglob_mock):
# Test the case when a coalang was provided with uppercase letters.
confparser_instance_mock = confparser_mock.return_value
confparser_instance_mock.parse.return_value = ["X"]
iglob_mock.return_value = ['some/CUSTOMSTYLE.coalang',
'SOME/xlang.coalang']
self.assertEqual(list(DocstyleDefinition.get_available_definitions()),
[('xlang', 'x')])
def test_load_external_coalang(self):
empty_metadata = self.Metadata('', '', '')
with TemporaryDirectory() as directory:
coalang_file = os.path.join(directory, "custom.coalang")
with open(coalang_file, "w") as file:
file.write("[COOL]\ndoc-markers = @@,@@,@@\n")
result = DocstyleDefinition.load(
"cool", "custom", coalang_dir=directory)
self.assertEqual(result.language, "cool")
self.assertEqual(result.docstyle, "custom")
self.assertEqual(result.markers, (('@@', '@@', '@@'),))
self.assertEqual(result.metadata, empty_metadata)
| agpl-3.0 |
Charlotte-Morgan/inasafe | safe/gui/tools/wizard/test/test_wizard_dialog_locale.py | 6 | 10273 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **GUI Test Cases for Wizard in Local mode.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'ismail@kartoza.com'
__date__ = '24/02/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
# pylint: disable=no-member
import unittest
import os
import sys
# Import qgis in order to set SIP API.
# pylint: disable=unused-import
import qgis
# pylint: enable=unused-import
from qgis.PyQt.QtCore import QDateTime
from safe.definitions.constants import INASAFE_TEST
skipped_reason = (
'These tests are skipped because it will make a segmentation fault. Just '
'run it separately.')
@unittest.skip(skipped_reason)
class TestWizardDialogLocale(unittest.TestCase):
"""Test for Wizard Dialog in Locale mode."""
def setUp(self):
if 'safe.metadata' in list(sys.modules.keys()):
del sys.modules['safe.metadata']
self.assertFalse('safe.metadata' in list(sys.modules.keys()))
os.environ['LANG'] = 'id'
def tearDown(self):
if 'LANG' in os.environ:
del os.environ['LANG']
def check_list(self, expected_list, list_widget):
"""Helper function to check that list_widget is equal to expected_list.
:param expected_list: List of expected values to be found.
:type expected_list: list
:param list_widget: List widget that wants to be checked.
:type expected_list: QListWidget
"""
real_list = []
for i in range(list_widget.count()):
real_list.append(list_widget.item(i).text())
message = ('Expected %s but I got %s' % (expected_list, real_list))
self.assertEqual(expected_list, real_list, message)
def check_current_step(self, expected_step, dialog):
"""Helper function to check the current step is expected_step
:param expected_step: The expected current step.
:type expected_step: int
:param dialog: The dialog that contains a wizard.
:type dialog: WizardDialog
"""
current_step = dialog.get_current_step()
message = ('Expected %s but I got %s' % (expected_step, current_step))
self.assertEqual(expected_step, current_step, message)
# noinspection PyUnresolvedReferences
def check_current_text(self, expected_text, list_widget):
"""Check the current text in list widget is expected_text
:param expected_text: The expected current step.
:type expected_text: str
:param list_widget: List widget that wants to be checked.
:type list_widget: QListWidget
"""
# noinspection PyUnresolvedReferences
message = 'No selected option in the list'
self.assertNotEqual(-1, list_widget.currentRow(), message)
current_text = list_widget.currentItem().text()
message = ('Expected %s but I got %s' % (expected_text, current_text))
self.assertEqual(expected_text, current_text, message)
# noinspection PyUnresolvedReferences
def select_from_list_widget(self, option, list_widget):
"""Helper function to select option from list_widget
:param option: Option to be chosen
:type option: str
:param list_widget: List widget that wants to be checked.
:type list_widget: QListWidget
"""
items = []
for i in range(list_widget.count()):
items.append(list_widget.item(i).text())
if list_widget.item(i).text() == option:
list_widget.setCurrentRow(i)
return
message = 'There is no %s in the list widget' % option
message += '\n The options are %s' % items
raise Exception(message)
def test_translation(self):
"""Test for metadata translation."""
from safe.test.utilities import (
clone_shp_layer, remove_vector_temp_file)
from safe.test.utilities import BOUNDDATA
from safe.test.utilities import get_qgis_app
# Get Qgis app handle
# noinspection PyPep8Naming
_, _, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)
from safe.gui.tools.wizard.wizard_dialog import WizardDialog
layer = clone_shp_layer(
name='kabupaten_jakarta',
include_keywords=True,
source_directory=BOUNDDATA)
# noinspection PyTypeChecker
dialog = WizardDialog(PARENT, IFACE)
dialog.set_keywords_creation_mode(layer)
expected_categories = ['ancaman']
# noinspection PyTypeChecker
self.check_list(expected_categories,
dialog.step_kw_purpose.lstCategories)
self.check_current_text(
'ancaman', dialog.step_kw_purpose.lstCategories)
dialog.pbnNext.click()
remove_vector_temp_file(layer.source())
def test_existing_complex_keywords(self):
"""Test for existing complex keywords in wizard in locale mode."""
from safe.test.utilities import (
clone_shp_layer, remove_vector_temp_file)
layer = clone_shp_layer(
name='tsunami_polygon', include_keywords=True, source_directory='')
from safe.test.utilities import get_qgis_app
# Get Qgis app handle
# noinspection PyPep8Naming
_, _, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)
from safe.gui.tools.wizard.wizard_dialog import WizardDialog
# noinspection PyTypeChecker
dialog = WizardDialog(PARENT, IFACE)
dialog.set_keywords_creation_mode(layer)
# select hazard
self.select_from_list_widget('ancaman',
dialog.step_kw_purpose.lstCategories)
dialog.pbnNext.click()
# select volcano
self.select_from_list_widget('gunung berapi', dialog.
step_kw_subcategory.lstSubcategories)
dialog.pbnNext.click()
# select volcano categorical unit
self.select_from_list_widget('Kategori gunung berapi',
dialog.step_kw_unit.lstUnits)
dialog.pbnNext.click()
# select GRIDCODE
self.select_from_list_widget(
'GRIDCODE', dialog.step_kw_field.lstFields)
dialog.pbnNext.click()
unit = dialog.step_kw_unit.selected_unit()
default_classes = unit['classes']
unassigned_values = [] # no need to check actually, not save in file
assigned_values = {
'low': ['5.0'],
'medium': ['3.0', '4.0'],
'high': ['2.0']
}
dialog.step_kw_classify.populate_classified_values(
unassigned_values, assigned_values, default_classes)
dialog.pbnNext.click()
source = 'Source'
source_scale = 'Source Scale'
source_url = 'Source Url'
source_date = QDateTime.fromString(
'06-12-2015 12:30',
'dd-MM-yyyy HH:mm')
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.leSource_date.seDateTime(source_date)
dialog.pbnNext.click() # next
dialog.pbnNext.click() # finish
# noinspection PyTypeChecker
dialog = WizardDialog(PARENT, IFACE)
dialog.set_keywords_creation_mode(layer)
# step 1 of 7 - select category
self.check_current_text(
'ancaman', dialog.step_kw_purpose.lstCategories)
# Click Next
dialog.pbnNext.click()
# step 2 of 7 - select subcategory
# noinspection PyTypeChecker
self.check_current_text('gunung berapi',
dialog.step_kw_subcategory.lstSubcategories)
# Click Next
dialog.pbnNext.click()
# step 3 of 7 - select volcano units
self.check_current_text('Kategori gunung berapi',
dialog.step_kw_unit.lstUnits)
# Click Next
dialog.pbnNext.click()
# step 4 of 7 - select field
self.check_current_text('GRIDCODE', dialog.step_kw_field.lstFields)
# Click Next
dialog.pbnNext.click()
for index in range(dialog.step_classify.lstUniqueValues.count()):
message = ('%s Should be in unassigned values' %
dialog.step_classify.lstUniqueValues.item(index).text())
self.assertIn(
dialog.step_classify.lstUniqueValues.item(index).text(),
unassigned_values,
message)
real_assigned_values = dialog.step_classify.selected_mapping()
self.assertDictEqual(real_assigned_values, assigned_values)
# Click Next
dialog.pbnNext.click()
# step 6 of 7 - enter source
message = ('Invalid Next button state in step 6! Disabled while '
'source is optional')
self.assertTrue(dialog.pbnNext.isEnabled(), message)
message = 'Source should be %s' % source
self.assertEqual(
dialog.step_kw_source.leSource.text(), source, message)
message = 'Source Url should be %s' % source_url
self.assertEqual(dialog.step_kw_source.leSource_url.text(),
source_url, message)
message = 'Source Date should be %s' % source_date.toString(
'dd-MM-yyyy HH:mm')
self.assertEqual(dialog.step_kw_source.leSource_date.dateTime(),
source_date, message)
message = 'Source Scale should be %s' % source_scale
self.assertEqual(dialog.step_kw_source.leSource_scale.text(),
source_scale, message)
dialog.pbnNext.click()
dialog.pbnCancel.click()
remove_vector_temp_file(layer.source())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
salguarnieri/intellij-community | python/testData/inspections/PyShadowingBuiltinsInspection/test.py | 69 | 1483 | def test_import_builtin_names():
import float
from foo import float
from bar import baz as <weak_warning descr="Shadows built-in name 'float'">float</weak_warning>
def test_builtin_function_parameters():
def test1(x, _, <weak_warning descr="Shadows built-in name 'len'">len</weak_warning>, <weak_warning descr="Shadows built-in name 'file'">file</weak_warning>=None):
pass
def test_builtin_function_name():
def <weak_warning descr="Shadows built-in name 'list'">list</weak_warning>():
pass
def test_builtin_assignment_targets():
foo = 2
<weak_warning descr="Shadows built-in name 'list'">list</weak_warning> = []
for <weak_warning descr="Shadows built-in name 'int'">int</weak_warning> in range(10):
print(int)
<weak_warning descr="Shadows built-in name 'range'">range</weak_warning> = []
<weak_warning descr="Shadows built-in name 'list'">list</weak_warning>, _ = (1, 2)
return [int for <weak_warning descr="Shadows built-in name 'int'">int</weak_warning> in range(10)]
def test_builtin_class_name():
class <weak_warning descr="Shadows built-in name 'list'">list</weak_warning>(object):
pass
def test_builtin_method_name():
class C:
def list(self):
pass
# PY-8646
def test_builtin_qualified_name():
test1.range = float()
class C:
def foo(self):
self.list = []
# PY-10164
def test_builtin_class_attribute():
class C:
id = 1
| apache-2.0 |
vicky2135/lucious | oscar/lib/python2.7/encodings/mac_croatian.py | 593 | 13889 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
maciekcc/tensorflow | tensorflow/contrib/remote_fused_graph/pylib/__init__.py | 96 | 1290 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Remote fused graph ops python library.
## This package provides classes for remote fused graph ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.remote_fused_graph.pylib.python.ops.remote_fused_graph_ops import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['remote_fused_graph_execute']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
d0ugal/django-rest-framework | rest_framework/urlpatterns.py | 27 | 2421 | from __future__ import unicode_literals
from django.conf.urls import url, include
from django.core.urlresolvers import RegexURLResolver
from rest_framework.settings import api_settings
def apply_suffix_patterns(urlpatterns, suffix_pattern, suffix_required):
ret = []
for urlpattern in urlpatterns:
if isinstance(urlpattern, RegexURLResolver):
# Set of included URL patterns
regex = urlpattern.regex.pattern
namespace = urlpattern.namespace
app_name = urlpattern.app_name
kwargs = urlpattern.default_kwargs
# Add in the included patterns, after applying the suffixes
patterns = apply_suffix_patterns(urlpattern.url_patterns,
suffix_pattern,
suffix_required)
ret.append(url(regex, include(patterns, namespace, app_name), kwargs))
else:
# Regular URL pattern
regex = urlpattern.regex.pattern.rstrip('$') + suffix_pattern
view = urlpattern._callback or urlpattern._callback_str
kwargs = urlpattern.default_args
name = urlpattern.name
# Add in both the existing and the new urlpattern
if not suffix_required:
ret.append(urlpattern)
ret.append(url(regex, view, kwargs, name))
return ret
def format_suffix_patterns(urlpatterns, suffix_required=False, allowed=None):
"""
Supplement existing urlpatterns with corresponding patterns that also
include a '.format' suffix. Retains urlpattern ordering.
urlpatterns:
A list of URL patterns.
suffix_required:
If `True`, only suffixed URLs will be generated, and non-suffixed
URLs will not be used. Defaults to `False`.
allowed:
An optional tuple/list of allowed suffixes. eg ['json', 'api']
Defaults to `None`, which allows any suffix.
"""
suffix_kwarg = api_settings.FORMAT_SUFFIX_KWARG
if allowed:
if len(allowed) == 1:
allowed_pattern = allowed[0]
else:
allowed_pattern = '(%s)' % '|'.join(allowed)
suffix_pattern = r'\.(?P<%s>%s)$' % (suffix_kwarg, allowed_pattern)
else:
suffix_pattern = r'\.(?P<%s>[a-z0-9]+)$' % suffix_kwarg
return apply_suffix_patterns(urlpatterns, suffix_pattern, suffix_required)
| bsd-2-clause |
big-pegasus/spark | examples/src/main/python/ml/gradient_boosted_tree_regressor_example.py | 122 | 2693 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Gradient Boosted Tree Regressor Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.regression import GBTRegressor
from pyspark.ml.feature import VectorIndexer
from pyspark.ml.evaluation import RegressionEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("GradientBoostedTreeRegressorExample")\
.getOrCreate()
# $example on$
# Load and parse the data file, converting it to a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Automatically identify categorical features, and index them.
# Set maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GBT model.
gbt = GBTRegressor(featuresCol="indexedFeatures", maxIter=10)
# Chain indexer and GBT in a Pipeline
pipeline = Pipeline(stages=[featureIndexer, gbt])
# Train model. This also runs the indexer.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "label", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = RegressionEvaluator(
labelCol="label", predictionCol="prediction", metricName="rmse")
rmse = evaluator.evaluate(predictions)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
gbtModel = model.stages[1]
print(gbtModel) # summary only
# $example off$
spark.stop()
| apache-2.0 |
szymex/xbmc-finnish-tv | plugin.video.yleareena/win32/Crypto/SelfTest/Protocol/test_chaffing.py | 120 | 2972 | #
# Test script for Crypto.Protocol.Chaffing
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import unittest
from Crypto.Protocol import Chaffing
text = """\
When in the Course of human events, it becomes necessary for one people to
dissolve the political bands which have connected them with another, and to
assume among the powers of the earth, the separate and equal station to which
the Laws of Nature and of Nature's God entitle them, a decent respect to the
opinions of mankind requires that they should declare the causes which impel
them to the separation.
We hold these truths to be self-evident, that all men are created equal, that
they are endowed by their Creator with certain unalienable Rights, that among
these are Life, Liberty, and the pursuit of Happiness. That to secure these
rights, Governments are instituted among Men, deriving their just powers from
the consent of the governed. That whenever any Form of Government becomes
destructive of these ends, it is the Right of the People to alter or to
abolish it, and to institute new Government, laying its foundation on such
principles and organizing its powers in such form, as to them shall seem most
likely to effect their Safety and Happiness.
"""
class ChaffingTest (unittest.TestCase):
def runTest(self):
"Simple tests of chaffing and winnowing"
# Test constructors
Chaffing.Chaff()
Chaffing.Chaff(0.5, 1)
self.assertRaises(ValueError, Chaffing.Chaff, factor=-1)
self.assertRaises(ValueError, Chaffing.Chaff, blocksper=-1)
data = [(1, 'data1', 'data1'), (2, 'data2', 'data2')]
c = Chaffing.Chaff(1.0, 1)
c.chaff(data)
chaff = c.chaff(data)
self.assertEqual(len(chaff), 4)
c = Chaffing.Chaff(0.0, 1)
chaff = c.chaff(data)
self.assertEqual(len(chaff), 2)
def get_tests(config={}):
return [ChaffingTest()]
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
faddat/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
IronLanguages/ironpython3 | Src/StdLib/Lib/ctypes/test/test_objects.py | 80 | 1682 | r'''
This tests the '_objects' attribute of ctypes instances. '_objects'
holds references to objects that must be kept alive as long as the
ctypes instance, to make sure that the memory buffer is valid.
WARNING: The '_objects' attribute is exposed ONLY for debugging ctypes itself,
it MUST NEVER BE MODIFIED!
'_objects' is initialized to a dictionary on first use, before that it
is None.
Here is an array of string pointers:
>>> from ctypes import *
>>> array = (c_char_p * 5)()
>>> print(array._objects)
None
>>>
The memory block stores pointers to strings, and the strings itself
assigned from Python must be kept.
>>> array[4] = b'foo bar'
>>> array._objects
{'4': b'foo bar'}
>>> array[4]
b'foo bar'
>>>
It gets more complicated when the ctypes instance itself is contained
in a 'base' object.
>>> class X(Structure):
... _fields_ = [("x", c_int), ("y", c_int), ("array", c_char_p * 5)]
...
>>> x = X()
>>> print(x._objects)
None
>>>
The'array' attribute of the 'x' object shares part of the memory buffer
of 'x' ('_b_base_' is either None, or the root object owning the memory block):
>>> print(x.array._b_base_) # doctest: +ELLIPSIS
<ctypes.test.test_objects.X object at 0x...>
>>>
>>> x.array[0] = b'spam spam spam'
>>> x._objects
{'0:2': b'spam spam spam'}
>>> x.array._b_base_._objects
{'0:2': b'spam spam spam'}
>>>
'''
import unittest, doctest, sys
import ctypes.test.test_objects
class TestCase(unittest.TestCase):
def test(self):
failures, tests = doctest.testmod(ctypes.test.test_objects)
self.assertFalse(failures, 'doctests failed, see output above')
if __name__ == '__main__':
doctest.testmod(ctypes.test.test_objects)
| apache-2.0 |
yamt/tempest | tempest/api/compute/servers/test_servers.py | 5 | 5503 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.compute import base
from tempest import test
class ServersTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
def tearDown(self):
self.clear_servers()
super(ServersTestJSON, self).tearDown()
@test.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0')
def test_create_server_with_admin_password(self):
# If an admin password is provided on server creation, the server's
# root password should be set to that password.
server = self.create_test_server(adminPass='testpassword')
# Verify the password is set correctly in the response
self.assertEqual('testpassword', server['adminPass'])
@test.idempotent_id('8fea6be7-065e-47cf-89b8-496e6f96c699')
def test_create_with_existing_server_name(self):
# Creating a server with a name that already exists is allowed
# TODO(sdague): clear out try, we do cleanup one layer up
server_name = data_utils.rand_name('server')
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id1 = server['id']
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id2 = server['id']
self.assertNotEqual(id1, id2, "Did not create a new server")
server = self.client.get_server(id1)
name1 = server['name']
server = self.client.get_server(id2)
name2 = server['name']
self.assertEqual(name1, name2)
@test.idempotent_id('f9e15296-d7f9-4e62-b53f-a04e89160833')
def test_create_specify_keypair(self):
# Specify a keypair while creating a server
key_name = data_utils.rand_name('key')
self.keypairs_client.create_keypair(key_name)
self.addCleanup(self.keypairs_client.delete_keypair, key_name)
self.keypairs_client.list_keypairs()
server = self.create_test_server(key_name=key_name)
self.client.wait_for_server_status(server['id'], 'ACTIVE')
server = self.client.get_server(server['id'])
self.assertEqual(key_name, server['key_name'])
def _update_server_name(self, server_id, status):
# The server name should be changed to the the provided value
new_name = data_utils.rand_name('server')
# Update the server with a new name
self.client.update_server(server_id,
name=new_name)
self.client.wait_for_server_status(server_id, status)
# Verify the name of the server has changed
server = self.client.get_server(server_id)
self.assertEqual(new_name, server['name'])
return server
@test.idempotent_id('5e6ccff8-349d-4852-a8b3-055df7988dd2')
def test_update_server_name(self):
# The server name should be changed to the the provided value
server = self.create_test_server(wait_until='ACTIVE')
self._update_server_name(server['id'], 'ACTIVE')
@test.idempotent_id('6ac19cb1-27a3-40ec-b350-810bdc04c08e')
def test_update_server_name_in_stop_state(self):
# The server name should be changed to the the provided value
server = self.create_test_server(wait_until='ACTIVE')
self.client.stop(server['id'])
self.client.wait_for_server_status(server['id'], 'SHUTOFF')
updated_server = self._update_server_name(server['id'], 'SHUTOFF')
self.assertNotIn('progress', updated_server)
@test.idempotent_id('89b90870-bc13-4b73-96af-f9d4f2b70077')
def test_update_access_server_address(self):
# The server's access addresses should reflect the provided values
server = self.create_test_server(wait_until='ACTIVE')
# Update the IPv4 and IPv6 access addresses
self.client.update_server(server['id'],
accessIPv4='1.1.1.1',
accessIPv6='::babe:202:202')
self.client.wait_for_server_status(server['id'], 'ACTIVE')
# Verify the access addresses have been updated
server = self.client.get_server(server['id'])
self.assertEqual('1.1.1.1', server['accessIPv4'])
self.assertEqual('::babe:202:202', server['accessIPv6'])
@test.idempotent_id('38fb1d02-c3c5-41de-91d3-9bc2025a75eb')
def test_create_server_with_ipv6_addr_only(self):
# Create a server without an IPv4 address(only IPv6 address).
server = self.create_test_server(accessIPv6='2001:2001::3')
self.client.wait_for_server_status(server['id'], 'ACTIVE')
server = self.client.get_server(server['id'])
self.assertEqual('2001:2001::3', server['accessIPv6'])
| apache-2.0 |
ROMFactory/android_external_chromium_org | tools/site_compare/scrapers/chrome/chrome01970.py | 189 | 1181 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for versions of Chrome up to 0.1.97.0."""
from drivers import windowing
import chromebase
# Default version
version = "0.1.97.0"
def GetChromeRenderPane(wnd):
return windowing.FindChildWindow(wnd, "Chrome_BrowserWindow")
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if succeeded, else an error code
"""
chromebase.GetChromeRenderPane = GetChromeRenderPane
return chromebase.Scrape(urls, outdir, size, pos, timeout, kwargs)
def Time(urls, size, timeout, **kwargs):
"""Forwards the Time command to chromebase."""
chromebase.GetChromeRenderPane = GetChromeRenderPane
return chromebase.Time(urls, size, timeout, kwargs)
| bsd-3-clause |
mitsuhiko/django | django/contrib/gis/utils/geoip.py | 316 | 14811 | """
This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R)
C API (http://www.maxmind.com/app/c). This is an alternative to the GPL
licensed Python GeoIP interface provided by MaxMind.
GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts.
For IP-based geolocation, this module requires the GeoLite Country and City
datasets, in binary format (CSV will not work!). The datasets may be
downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/.
Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory
corresponding to settings.GEOIP_PATH. See the GeoIP docstring and examples
below for more details.
TODO: Verify compatibility with Windows.
Example:
>>> from django.contrib.gis.utils import GeoIP
>>> g = GeoIP()
>>> g.country('google.com')
{'country_code': 'US', 'country_name': 'United States'}
>>> g.city('72.14.207.99')
{'area_code': 650,
'city': 'Mountain View',
'country_code': 'US',
'country_code3': 'USA',
'country_name': 'United States',
'dma_code': 807,
'latitude': 37.419200897216797,
'longitude': -122.05740356445312,
'postal_code': '94043',
'region': 'CA'}
>>> g.lat_lon('salon.com')
(37.789798736572266, -122.39420318603516)
>>> g.lon_lat('uh.edu')
(-95.415199279785156, 29.77549934387207)
>>> g.geos('24.124.1.80').wkt
'POINT (-95.2087020874023438 39.0392990112304688)'
"""
import os, re
from ctypes import c_char_p, c_float, c_int, Structure, CDLL, POINTER
from ctypes.util import find_library
from django.conf import settings
if not settings.configured: settings.configure()
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = dict((key, getattr(settings, key))
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key))
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None)
# GeoIP Exception class.
class GeoIPException(Exception): pass
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name: lib_path = find_library(lib_name)
if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Regular expressions for recognizing IP addresses and the GeoIP
# free database editions.
ipregex = re.compile(r'^(?P<w>\d\d?\d?)\.(?P<x>\d\d?\d?)\.(?P<y>\d\d?\d?)\.(?P<z>\d\d?\d?)$')
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
#### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accomodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
# TODO: The following structure fields were added in 1.4.3 --
# uncomment these fields when sure previous versions are no
# longer distributed by package maintainers.
#('charset', c_int),
#('continent_code', c_char_p),
]
class GeoIPTag(Structure): pass
#### ctypes function prototypes ####
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# For retrieving records by name or address.
def record_output(func):
func.restype = RECTYPE
return func
rec_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
rec_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
geoip_open = lgeoip.GeoIP_open
geoip_open.restype = DBTYPE
geoip_close = lgeoip.GeoIP_delete
geoip_close.argtypes = [DBTYPE]
geoip_close.restype = None
# String output routines.
def string_output(func):
func.restype = c_char_p
return func
geoip_dbinfo = string_output(lgeoip.GeoIP_database_info)
cntry_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
cntry_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
cntry_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
cntry_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
#### GeoIP class ####
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been updated,
# reload filehandle and/or memory cache.
#
# GEOIP_INDEX_CACHE - just cache
# the most frequently accessed index portion of the database, resulting
# in faster lookups than GEOIP_STANDARD, but less memory usage than
# GEOIP_MEMORY_CACHE - useful for larger databases such as
# GeoIP Organization and GeoIP City. Note, for GeoIP Country, Region
# and Netspeed databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
cache_options = dict((opt, None) for opt in (0, 1, 2, 4))
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
and GEOIP_INDEX_CACHE `GeoIPOptions` C API settings,
respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = self.cache_options[cache]
else:
raise GeoIPException('Invalid caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, basestring):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = geoip_open(country_db, cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = geoip_open(city_db, cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = geoip_open(path, cache)
info = geoip_dbinfo(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if self._country: geoip_close(self._country)
if self._city: geoip_close(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, basestring):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
self._check_query(query, city=True)
if ipregex.match(query):
# If an IP address was passed in
ptr = rec_by_addr(self._city, c_char_p(query))
else:
# If a FQDN was passed in.
ptr = rec_by_name(self._city, c_char_p(query))
# Checking the pointer to the C structure, if valid pull out elements
# into a dicionary and return.
if bool(ptr):
record = ptr.contents
return dict((tup[0], getattr(record, tup[0])) for tup in record._fields_)
else:
return None
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
self._check_query(query, city_or_country=True)
if self._country:
if ipregex.match(query): return cntry_code_by_addr(self._country, query)
else: return cntry_code_by_name(self._country, query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
self._check_query(query, city_or_country=True)
if self._country:
if ipregex.match(query): return cntry_name_by_addr(self._country, query)
else: return cntry_name_by_name(self._country, query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictonary with with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code' : self.country_code(query),
'country_name' : self.country_name(query),
}
#### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None: return None
else: return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
#### GeoIP Database Information Routines ####
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = geoip_dbinfo(self._country)
return ci
country_info = property(country_info)
def city_info(self):
"Retuns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = geoip_dbinfo(self._city)
return ci
city_info = property(city_info)
def info(self):
"Returns information about all GeoIP databases in use."
return 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
info = property(info)
#### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
| bsd-3-clause |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/reprlib.py | 19 | 5267 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| gpl-2.0 |
keenondrums/sovrin-node | sovrin_client/agent/endpoint.py | 1 | 2195 | from typing import Callable
from plenum import config
from plenum.common.message_processor import MessageProcessor
from stp_core.common.log import getlogger
from stp_core.network.auth_mode import AuthMode
from stp_raet.util import getHaFromLocalEstate
from plenum.common.util import randomString
from stp_core.crypto.util import randomSeed
from stp_raet.rstack import SimpleRStack
from stp_core.types import HA
from stp_zmq.simple_zstack import SimpleZStack
logger = getlogger()
class EndpointCore(MessageProcessor):
def tracedMsgHandler(self, msg):
logger.debug("Got {}".format(msg))
self.msgHandler(msg)
class REndpoint(SimpleRStack, EndpointCore):
def __init__(self, port: int, msgHandler: Callable,
name: str=None, basedirpath: str=None):
if name and basedirpath:
ha = getHaFromLocalEstate(name, basedirpath)
if ha and ha[1] != port:
port = ha[1]
stackParams = {
"name": name or randomString(8),
"ha": HA("0.0.0.0", port),
"main": True,
"auth_mode": AuthMode.ALLOW_ANY.value,
"mutable": "mutable",
"messageTimeout": config.RAETMessageTimeout
}
if basedirpath:
stackParams["basedirpath"] = basedirpath
SimpleRStack.__init__(self, stackParams, self.tracedMsgHandler)
self.msgHandler = msgHandler
class ZEndpoint(SimpleZStack, EndpointCore):
def __init__(self, port: int, msgHandler: Callable,
name: str=None, basedirpath: str=None, seed=None,
onlyListener=False, msgRejectHandler=None):
stackParams = {
"name": name or randomString(8),
"ha": HA("0.0.0.0", port),
"auth_mode": AuthMode.ALLOW_ANY.value
}
if basedirpath:
stackParams["basedirpath"] = basedirpath
seed = seed or randomSeed()
SimpleZStack.__init__(
self,
stackParams,
self.tracedMsgHandler,
seed=seed,
onlyListener=onlyListener,
msgRejectHandler=msgRejectHandler)
self.msgHandler = msgHandler
| apache-2.0 |
scotthartbti/android_external_chromium_org | content/test/gpu/page_sets/PRESUBMIT.py | 27 | 2657 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
# Avoid leaking changes to global sys.path.
_old_sys_path = sys.path
try:
telemetry_dir = os.path.abspath(os.path.join(
os.pardir, os.pardir, os.pardir, os.pardir, 'tools', 'telemetry'))
sys.path.append(telemetry_dir)
from telemetry.page import cloud_storage
finally:
sys.path = _old_sys_path
def _SyncFilesToCloud(input_api, output_api):
"""Searches for .sha1 files and uploads them to Cloud Storage.
It validates all the hashes and skips upload if not necessary.
"""
# Look in both buckets, in case the user uploaded the file manually. But this
# script focuses on WPR archives, so it only uploads to the internal bucket.
hashes_in_cloud_storage = cloud_storage.List(cloud_storage.INTERNAL_BUCKET)
hashes_in_cloud_storage += cloud_storage.List(cloud_storage.PUBLIC_BUCKET)
results = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
hash_path = affected_file.AbsoluteLocalPath()
file_path, extension = os.path.splitext(hash_path)
if extension != '.sha1':
continue
with open(hash_path, 'rb') as f:
file_hash = f.read(1024).rstrip()
if file_hash in hashes_in_cloud_storage:
results.append(output_api.PresubmitNotifyResult(
'File already in Cloud Storage, skipping upload: %s' % hash_path))
continue
if not re.match('^([A-Za-z0-9]{40})$', file_hash):
results.append(output_api.PresubmitError(
'Hash file does not contain a valid SHA-1 hash: %s' % hash_path))
continue
if not os.path.exists(file_path):
results.append(output_api.PresubmitError(
'Hash file exists, but file not found: %s' % hash_path))
continue
if cloud_storage.GetHash(file_path) != file_hash:
results.append(output_api.PresubmitError(
'Hash file does not match file\'s actual hash: %s' % hash_path))
continue
try:
cloud_storage.Insert(cloud_storage.INTERNAL_BUCKET, file_hash, file_path)
results.append(output_api.PresubmitNotifyResult(
'Uploaded file to Cloud Storage: %s' % hash_path))
except cloud_storage.CloudStorageError, e:
results.append(output_api.PresubmitError(
'Unable to upload to Cloud Storage: %s\n\n%s' % (hash_path, e)))
return results
def CheckChangeOnUpload(input_api, output_api):
return _SyncFilesToCloud(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _SyncFilesToCloud(input_api, output_api)
| bsd-3-clause |
almeidapaulopt/erpnext | erpnext/accounts/report/non_billed_report.py | 40 | 1801 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext import get_default_currency
from frappe.model.meta import get_field_precision
def get_ordered_to_be_billed_data(args):
doctype, party = args.get('doctype'), args.get('party')
child_tab = doctype + " Item"
precision = get_field_precision(frappe.get_meta(child_tab).get_field("billed_amt"),
currency=get_default_currency()) or 2
project_field = get_project_field(doctype, party)
return frappe.db.sql("""
Select
`{parent_tab}`.name, `{parent_tab}`.{date_field}, `{parent_tab}`.{party}, `{parent_tab}`.{party}_name,
{project_field}, `{child_tab}`.item_code, `{child_tab}`.base_amount,
(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),
(`{child_tab}`.base_amount - (`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1))),
`{child_tab}`.item_name, `{child_tab}`.description, `{parent_tab}`.company
from
`{parent_tab}`, `{child_tab}`
where
`{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1 and `{parent_tab}`.status != 'Closed'
and `{child_tab}`.amount > 0 and round(`{child_tab}`.billed_amt *
ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) < `{child_tab}`.base_amount
order by
`{parent_tab}`.{order} {order_by}
""".format(parent_tab = 'tab' + doctype, child_tab = 'tab' + child_tab, precision= precision, party = party,
date_field = args.get('date'), project_field = project_field, order= args.get('order'), order_by = args.get('order_by')))
def get_project_field(doctype, party):
if party == "supplier": doctype = doctype + ' Item'
return "`tab%s`.project"%(doctype) | gpl-3.0 |
ltucker/radarpost | radarpost/commands/useradmin.py | 1 | 4133 | from couchdb import Server, ResourceNotFound
from radarpost.cli import COMMANDLINE_PLUGIN, BasicCommand, get_basic_option_parser
from radarpost import plugins
from radarpost.user import User, ROLE_ADMIN
from getpass import getpass
class CreateUserCommand(BasicCommand):
command_name = 'create_user'
description = 'create a user'
@classmethod
def setup_options(cls, parser):
parser.set_usage(r"%prog" + "%s <username> [options]" % cls.command_name)
parser.add_option('--admin', action="store_true", dest="is_admin",
default=False, help="create an administrative user")
parser.add_option('--locked', action="store_true", dest="is_locked",
default=False,
help="create with locked password, do not prompt for password.")
def __call__(self, username, is_locked=False, is_admin=False):
"""
Create a user with the given username.
is_locked - if True, create with a locked password
is_admin - if True, grant administrative rights to the user
"""
couchdb = Server(self.config['couchdb.address'])
try:
udb = couchdb[self.config['couchdb.users_database']]
except:
print "Failed to connect to couchdb at %s/%s" % (self.config['couchdb.address'],
self.config['couchdb.users_database'])
return 1
new_user = User(username=username)
if new_user.id in udb:
print 'User "%s" already exists' % username
return 1
if not is_locked:
done = False
while(not done):
password = getpass(prompt="Password for %s: " % username)
password2 = getpass(prompt="Repeat password: ")
if password == password2:
done = True
else:
print "Passwords did not match, try again.\n"
new_user.set_password(password)
if is_admin:
new_user.roles = [ROLE_ADMIN]
new_user.store(udb)
print 'Created user "%s"' % username
plugins.register(CreateUserCommand, COMMANDLINE_PLUGIN)
class ResetPasswordCommand(BasicCommand):
command_name = 'reset_password'
description = "reset a user's password"
@classmethod
def setup_options(cls, parser):
parser.set_usage(r"%prog" + "%s <username> [options]" % cls.command_name)
parser.add_option('--locked', action="store_true", dest="is_locked",
default=False,
help="lock the user's password, do not prompt for password.")
def __call__(self, username, is_locked=False):
"""
Reset the password of the user with the given username.
is_locked - if True, lock the user's password
"""
couchdb = Server(self.config['couchdb.address'])
try:
udb = couchdb[self.config['couchdb.users_database']]
except:
print "Failed to connect to couchdb at %s/%s" % (self.config['couchdb.address'],
self.config['couchdb.users_database'])
return 1
try:
user = User.get_by_username(udb, username)
except ResourceNotFound:
print 'User "%s" does not exist' % username
return 1
if not is_locked:
done = False
while(not done):
password = getpass(prompt="New password for %s: " % username)
password2 = getpass(prompt="Repeat password: ")
if password == password2:
done = True
else:
print "Passwords did not match, try again.\n"
user.set_password(password)
else:
user.lock_password()
user.store(udb)
print 'Password changed for user "%s"' % username
plugins.register(ResetPasswordCommand, COMMANDLINE_PLUGIN) | gpl-2.0 |
cecep-edu/edx-platform | common/lib/xmodule/xmodule/tests/__init__.py | 61 | 27865 | """
unittests for xmodule
Run like this:
paver test_lib -l common/lib/xmodule
"""
import inspect
import json
import os
import pprint
import sys
import traceback
import unittest
from contextlib import contextmanager, nested
from functools import wraps
from lazy import lazy
from mock import Mock, patch
from operator import attrgetter
from path import Path as path
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds, Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.assetstore import AssetMetadata
from xmodule.error_module import ErrorDescriptor
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES, ModuleStoreDraftAndPublished
from xmodule.modulestore.inheritance import InheritanceMixin, own_metadata
from xmodule.modulestore.mongo.draft import DraftModuleStore
from xmodule.modulestore.xml import CourseLocationManager
from xmodule.x_module import ModuleSystem, XModuleDescriptor, XModuleMixin
MODULE_DIR = path(__file__).dirname()
# Location of common test DATA directory
# '../../../../edx-platform/common/test/data/'
DATA_DIR = MODULE_DIR.parent.parent.parent.parent / "test" / "data"
class TestModuleSystem(ModuleSystem): # pylint: disable=abstract-method
"""
ModuleSystem for testing
"""
def __init__(self, **kwargs): # pylint: disable=unused-argument
id_manager = CourseLocationManager(kwargs['course_id'])
kwargs.setdefault('id_reader', id_manager)
kwargs.setdefault('id_generator', id_manager)
kwargs.setdefault('services', {}).setdefault('field-data', DictFieldData({}))
super(TestModuleSystem, self).__init__(**kwargs)
def handler_url(self, block, handler, suffix='', query='', thirdparty=False):
return '{usage_id}/{handler}{suffix}?{query}'.format(
usage_id=unicode(block.scope_ids.usage_id),
handler=handler,
suffix=suffix,
query=query,
)
def local_resource_url(self, block, uri):
return 'resource/{usage_id}/{uri}'.format(
usage_id=unicode(block.scope_ids.usage_id),
uri=uri,
)
# Disable XBlockAsides in most tests
def get_asides(self, block):
return []
def __repr__(self):
"""
Custom hacky repr.
XBlock.Runtime.render() replaces the _view_name attribute while rendering, which
causes rendered comparisons of blocks to fail as unequal. So make the _view_name
attribute None during the base repr - and set it back to original value afterward.
"""
orig_view_name = None
if hasattr(self, '_view_name'):
orig_view_name = self._view_name
self._view_name = None
rt_repr = super(TestModuleSystem, self).__repr__()
self._view_name = orig_view_name
return rt_repr
def get_test_system(course_id=SlashSeparatedCourseKey('org', 'course', 'run')):
"""
Construct a test ModuleSystem instance.
By default, the render_template() method simply returns the repr of the
context it is passed. You can override this behavior by monkey patching::
system = get_test_system()
system.render_template = my_render_func
where `my_render_func` is a function of the form my_render_func(template, context).
"""
user = Mock(name='get_test_system.user', is_staff=False)
descriptor_system = get_test_descriptor_system()
def get_module(descriptor):
"""Mocks module_system get_module function"""
# pylint: disable=protected-access
# Unlike XBlock Runtimes or DescriptorSystems,
# each XModule is provided with a new ModuleSystem.
# Construct one for the new XModule.
module_system = get_test_system()
# Descriptors can all share a single DescriptorSystem.
# So, bind to the same one as the current descriptor.
module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.bind_for_student(module_system, user.id)
return descriptor
return TestModuleSystem(
static_url='/static',
track_function=Mock(name='get_test_system.track_function'),
get_module=get_module,
render_template=mock_render_template,
replace_urls=str,
user=user,
get_real_user=lambda __: user,
filestore=Mock(name='get_test_system.filestore'),
debug=True,
hostname="edx.org",
xqueue={
'interface': None,
'callback_url': '/',
'default_queuename': 'testqueue',
'waittime': 10,
'construct_callback': Mock(name='get_test_system.xqueue.construct_callback', side_effect="/"),
},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student',
course_id=course_id,
error_descriptor_class=ErrorDescriptor,
get_user_role=Mock(name='get_test_system.get_user_role', is_staff=False),
user_location=Mock(name='get_test_system.user_location'),
descriptor_runtime=descriptor_system,
)
def get_test_descriptor_system():
"""
Construct a test DescriptorSystem instance.
"""
field_data = DictFieldData({})
descriptor_system = MakoDescriptorSystem(
load_item=Mock(name='get_test_descriptor_system.load_item'),
resources_fs=Mock(name='get_test_descriptor_system.resources_fs'),
error_tracker=Mock(name='get_test_descriptor_system.error_tracker'),
render_template=mock_render_template,
mixins=(InheritanceMixin, XModuleMixin),
field_data=field_data,
services={'field-data': field_data},
)
descriptor_system.get_asides = lambda block: []
return descriptor_system
def mock_render_template(*args, **kwargs):
"""
Pretty-print the args and kwargs.
Allows us to not depend on any actual template rendering mechanism,
while still returning a unicode object
"""
return pprint.pformat((args, kwargs)).decode()
class ModelsTest(unittest.TestCase):
def test_load_class(self):
vc = XModuleDescriptor.load_class('video')
vc_str = "<class 'xmodule.video_module.video_module.VideoDescriptor'>"
self.assertEqual(str(vc), vc_str)
class LogicTest(unittest.TestCase):
"""Base class for testing xmodule logic."""
descriptor_class = None
raw_field_data = {}
def setUp(self):
super(LogicTest, self).setUp()
self.system = get_test_system()
self.descriptor = Mock(name="descriptor", url_name='', category='test')
self.xmodule_class = self.descriptor_class.module_class
usage_key = self.system.course_id.make_usage_key(self.descriptor.category, 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, self.descriptor.category, usage_key, usage_key)
self.xmodule = self.xmodule_class(
self.descriptor, self.system, DictFieldData(self.raw_field_data), scope_ids
)
def ajax_request(self, dispatch, data):
"""Call Xmodule.handle_ajax."""
return json.loads(self.xmodule.handle_ajax(dispatch, data))
def map_references(value, field, actual_course_key):
"""
Map the references in value to actual_course_key and return value
"""
if not value: # if falsey
return value
if isinstance(field, Reference):
return value.map_into_course(actual_course_key)
if isinstance(field, ReferenceList):
return [sub.map_into_course(actual_course_key) for sub in value]
if isinstance(field, ReferenceValueDict):
return {key: ele.map_into_course(actual_course_key) for key, ele in value.iteritems()}
return value
class BulkAssertionError(AssertionError):
"""
An AssertionError that contains many sub-assertions.
"""
def __init__(self, assertion_errors):
self.errors = assertion_errors
super(BulkAssertionError, self).__init__("The following assertions were raised:\n{}".format(
"\n\n".join(self.errors)
))
class _BulkAssertionManager(object):
"""
This provides a facility for making a large number of assertions, and seeing all of
the failures at once, rather than only seeing single failures.
"""
def __init__(self, test_case):
self._assertion_errors = []
self._test_case = test_case
def log_error(self, formatted_exc):
"""
Record ``formatted_exc`` in the set of exceptions captured by this assertion manager.
"""
self._assertion_errors.append(formatted_exc)
def raise_assertion_errors(self):
"""
Raise a BulkAssertionError containing all of the captured AssertionErrors,
if there were any.
"""
if self._assertion_errors:
raise BulkAssertionError(self._assertion_errors)
class BulkAssertionTest(unittest.TestCase):
"""
This context manager provides a _BulkAssertionManager to assert with,
and then calls `raise_assertion_errors` at the end of the block to validate all
of the assertions.
"""
def setUp(self, *args, **kwargs):
super(BulkAssertionTest, self).setUp(*args, **kwargs)
# Use __ to not pollute the namespace of subclasses with what could be a fairly generic name.
self.__manager = None
@contextmanager
def bulk_assertions(self):
"""
A context manager that will capture all assertion failures made by self.assert*
methods within its context, and raise a single combined assertion error at
the end of the context.
"""
if self.__manager:
yield
else:
try:
self.__manager = _BulkAssertionManager(self)
yield
except Exception:
raise
else:
manager = self.__manager
self.__manager = None
manager.raise_assertion_errors()
@contextmanager
def _capture_assertion_errors(self):
"""
A context manager that captures any AssertionError raised within it,
and, if within a ``bulk_assertions`` context, records the captured
assertion to the bulk assertion manager. If not within a ``bulk_assertions``
context, just raises the original exception.
"""
try:
# Only wrap the first layer of assert functions by stashing away the manager
# before executing the assertion.
manager = self.__manager
self.__manager = None
yield
except AssertionError: # pylint: disable=broad-except
if manager is not None:
# Reconstruct the stack in which the error was thrown (so that the traceback)
# isn't cut off at `assertion(*args, **kwargs)`.
exc_type, exc_value, exc_tb = sys.exc_info()
# Count the number of stack frames before you get to a
# unittest context (walking up the stack from here).
relevant_frames = 0
for frame_record in inspect.stack():
# This is the same criterion used by unittest to decide if a
# stack frame is relevant to exception printing.
frame = frame_record[0]
if '__unittest' in frame.f_globals:
break
relevant_frames += 1
stack_above = traceback.extract_stack()[-relevant_frames:-1]
stack_below = traceback.extract_tb(exc_tb)
formatted_stack = traceback.format_list(stack_above + stack_below)
formatted_exc = traceback.format_exception_only(exc_type, exc_value)
manager.log_error(
"".join(formatted_stack + formatted_exc)
)
else:
raise
finally:
self.__manager = manager
def _wrap_assertion(self, assertion):
"""
Wraps an assert* method to capture an immediate exception,
or to generate a new assertion capturing context (in the case of assertRaises
and assertRaisesRegexp).
"""
@wraps(assertion)
def assert_(*args, **kwargs):
"""
Execute a captured assertion, and catch any assertion errors raised.
"""
context = None
# Run the assertion, and capture any raised assertionErrors
with self._capture_assertion_errors():
context = assertion(*args, **kwargs)
# Handle the assertRaises family of functions by returning
# a context manager that surrounds the assertRaises
# with our assertion capturing context manager.
if context is not None:
return nested(self._capture_assertion_errors(), context)
return assert_
def __getattribute__(self, name):
"""
Wrap all assert* methods of this class using self._wrap_assertion,
to capture all assertion errors in bulk.
"""
base_attr = super(BulkAssertionTest, self).__getattribute__(name)
if name.startswith('assert'):
return self._wrap_assertion(base_attr)
else:
return base_attr
class LazyFormat(object):
"""
An stringy object that delays formatting until it's put into a string context.
"""
__slots__ = ('template', 'args', 'kwargs', '_message')
def __init__(self, template, *args, **kwargs):
self.template = template
self.args = args
self.kwargs = kwargs
self._message = None
def __unicode__(self):
if self._message is None:
self._message = self.template.format(*self.args, **self.kwargs)
return self._message
def __repr__(self):
return unicode(self)
def __len__(self):
return len(unicode(self))
def __getitem__(self, index):
return unicode(self)[index]
class CourseComparisonTest(BulkAssertionTest):
"""
Mixin that has methods for comparing courses for equality.
"""
def setUp(self):
super(CourseComparisonTest, self).setUp()
self.field_exclusions = set()
self.ignored_asset_keys = set()
def exclude_field(self, usage_id, field_name):
"""
Mark field ``field_name`` of expected block usage ``usage_id`` as ignored
Args:
usage_id (:class:`opaque_keys.edx.UsageKey` or ``None``). If ``None``, skip, this field in all blocks
field_name (string): The name of the field to skip
"""
self.field_exclusions.add((usage_id, field_name))
def ignore_asset_key(self, key_name):
"""
Add an asset key to the list of keys to be ignored when comparing assets.
Args:
key_name: The name of the key to ignore.
"""
self.ignored_asset_keys.add(key_name)
def assertReferenceRelativelyEqual(self, reference_field, expected_block, actual_block):
"""
Assert that the supplied reference field is identical on the expected_block and actual_block,
assoming that the references are only relative (that is, comparing only on block_type and block_id,
not course_key).
"""
def extract_key(usage_key):
if usage_key is None:
return None
else:
return (usage_key.block_type, usage_key.block_id)
expected = reference_field.read_from(expected_block)
actual = reference_field.read_from(actual_block)
if isinstance(reference_field, Reference):
expected = extract_key(expected)
actual = extract_key(actual)
elif isinstance(reference_field, ReferenceList):
expected = [extract_key(key) for key in expected]
actual = [extract_key(key) for key in actual]
elif isinstance(reference_field, ReferenceValueDict):
expected = {key: extract_key(val) for (key, val) in expected.iteritems()}
actual = {key: extract_key(val) for (key, val) in actual.iteritems()}
self.assertEqual(
expected,
actual,
LazyFormat(
"Field {} doesn't match between usages {} and {}: {!r} != {!r}",
reference_field.name,
expected_block.scope_ids.usage_id,
actual_block.scope_ids.usage_id,
expected,
actual
)
)
def assertBlocksEqualByFields(self, expected_block, actual_block):
"""
Compare block fields to check for equivalence.
"""
self.assertEqual(expected_block.fields, actual_block.fields)
for field in expected_block.fields.values():
self.assertFieldEqual(field, expected_block, actual_block)
def assertFieldEqual(self, field, expected_block, actual_block):
"""
Compare a single block field for equivalence.
"""
if isinstance(field, (Reference, ReferenceList, ReferenceValueDict)):
self.assertReferenceRelativelyEqual(field, expected_block, actual_block)
else:
expected = field.read_from(expected_block)
actual = field.read_from(actual_block)
self.assertEqual(
expected,
actual,
LazyFormat(
"Field {} doesn't match between usages {} and {}: {!r} != {!r}",
field.name,
expected_block.scope_ids.usage_id,
actual_block.scope_ids.usage_id,
expected,
actual
)
)
def assertCoursesEqual(self, expected_store, expected_course_key, actual_store, actual_course_key):
"""
Assert that the courses identified by ``expected_course_key`` in ``expected_store`` and
``actual_course_key`` in ``actual_store`` are identical (ignore differences related
owing to the course_keys being different).
Any field value mentioned in ``self.field_exclusions`` by the key (usage_id, field_name)
will be ignored for the purpose of equality checking.
"""
# compare published
with expected_store.branch_setting(ModuleStoreEnum.Branch.published_only, expected_course_key):
with actual_store.branch_setting(ModuleStoreEnum.Branch.published_only, actual_course_key):
expected_items = expected_store.get_items(expected_course_key, revision=ModuleStoreEnum.RevisionOption.published_only)
actual_items = actual_store.get_items(actual_course_key, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertGreater(len(expected_items), 0)
self._assertCoursesEqual(expected_items, actual_items, actual_course_key)
# if the modulestore supports having a draft branch
if isinstance(expected_store, ModuleStoreDraftAndPublished):
with expected_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, expected_course_key):
with actual_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, actual_course_key):
# compare draft
if expected_store.get_modulestore_type(None) == ModuleStoreEnum.Type.split:
revision = ModuleStoreEnum.RevisionOption.draft_only
else:
revision = None
expected_items = expected_store.get_items(expected_course_key, revision=revision)
if actual_store.get_modulestore_type(None) == ModuleStoreEnum.Type.split:
revision = ModuleStoreEnum.RevisionOption.draft_only
else:
revision = None
actual_items = actual_store.get_items(actual_course_key, revision=revision)
self._assertCoursesEqual(expected_items, actual_items, actual_course_key, expect_drafts=True)
def _assertCoursesEqual(self, expected_items, actual_items, actual_course_key, expect_drafts=False):
"""
Actual algorithm to compare courses.
"""
with self.bulk_assertions():
self.assertEqual(len(expected_items), len(actual_items))
def map_key(usage_key):
return (usage_key.block_type, usage_key.block_id)
actual_item_map = {
map_key(item.location): item
for item in actual_items
}
# Split Mongo and Old-Mongo disagree about what the block_id of courses is, so skip those in
# this comparison
self.assertItemsEqual(
[map_key(item.location) for item in expected_items if item.scope_ids.block_type != 'course'],
[key for key in actual_item_map.keys() if key[0] != 'course'],
)
for expected_item in expected_items:
actual_item_location = actual_course_key.make_usage_key(expected_item.category, expected_item.location.block_id)
# split and old mongo use different names for the course root but we don't know which
# modulestore actual's come from here; so, assume old mongo and if that fails, assume split
if expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name=actual_item_location.run)
actual_item = actual_item_map.get(map_key(actual_item_location))
# must be split
if actual_item is None and expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name='course')
actual_item = actual_item_map.get(map_key(actual_item_location))
# Formatting the message slows down tests of large courses significantly, so only do it if it would be used
self.assertIn(map_key(actual_item_location), actual_item_map.keys())
if actual_item is None:
continue
# compare fields
self.assertEqual(expected_item.fields, actual_item.fields)
for field_name, field in expected_item.fields.iteritems():
if (expected_item.scope_ids.usage_id, field_name) in self.field_exclusions:
continue
if (None, field_name) in self.field_exclusions:
continue
# Children are handled specially
if field_name == 'children':
continue
self.assertFieldEqual(field, expected_item, actual_item)
# compare children
self.assertEqual(expected_item.has_children, actual_item.has_children)
if expected_item.has_children:
expected_children = [
(expected_item_child.location.block_type, expected_item_child.location.block_id)
# get_children() rather than children to strip privates from public parents
for expected_item_child in expected_item.get_children()
]
actual_children = [
(item_child.location.block_type, item_child.location.block_id)
# get_children() rather than children to strip privates from public parents
for item_child in actual_item.get_children()
]
self.assertEqual(expected_children, actual_children)
def assertAssetEqual(self, expected_course_key, expected_asset, actual_course_key, actual_asset):
"""
Assert that two assets are equal, allowing for differences related to their being from different courses.
"""
for key in self.ignored_asset_keys:
if key in expected_asset:
del expected_asset[key]
if key in actual_asset:
del actual_asset[key]
expected_key = expected_asset.pop('asset_key')
actual_key = actual_asset.pop('asset_key')
self.assertEqual(expected_key.map_into_course(actual_course_key), actual_key)
self.assertEqual(expected_key, actual_key.map_into_course(expected_course_key))
expected_filename = expected_asset.pop('filename')
actual_filename = actual_asset.pop('filename')
self.assertEqual(expected_key.to_deprecated_string(), expected_filename)
self.assertEqual(actual_key.to_deprecated_string(), actual_filename)
self.assertEqual(expected_asset, actual_asset)
def _assertAssetsEqual(self, expected_course_key, expected_assets, actual_course_key, actual_assets): # pylint: disable=invalid-name
"""
Private helper method for assertAssetsEqual
"""
self.assertEqual(len(expected_assets), len(actual_assets))
actual_assets_map = {asset['asset_key']: asset for asset in actual_assets}
for expected_item in expected_assets:
actual_item = actual_assets_map[expected_item['asset_key'].map_into_course(actual_course_key)]
self.assertAssetEqual(expected_course_key, expected_item, actual_course_key, actual_item)
def assertAssetsEqual(self, expected_store, expected_course_key, actual_store, actual_course_key):
"""
Assert that the course assets identified by ``expected_course_key`` in ``expected_store`` and
``actual_course_key`` in ``actual_store`` are identical, allowing for differences related
to their being from different course keys.
"""
expected_content, expected_count = expected_store.get_all_content_for_course(expected_course_key)
actual_content, actual_count = actual_store.get_all_content_for_course(actual_course_key)
with self.bulk_assertions():
self.assertEqual(expected_count, actual_count)
self._assertAssetsEqual(expected_course_key, expected_content, actual_course_key, actual_content)
expected_thumbs = expected_store.get_all_content_thumbnails_for_course(expected_course_key)
actual_thumbs = actual_store.get_all_content_thumbnails_for_course(actual_course_key)
self._assertAssetsEqual(expected_course_key, expected_thumbs, actual_course_key, actual_thumbs)
def assertAssetsMetadataEqual(self, expected_modulestore, expected_course_key, actual_modulestore, actual_course_key):
"""
Assert that the modulestore asset metdata for the ``expected_course_key`` and the ``actual_course_key``
are equivalent.
"""
expected_course_assets = expected_modulestore.get_all_asset_metadata(
expected_course_key, None, sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
actual_course_assets = actual_modulestore.get_all_asset_metadata(
actual_course_key, None, sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
self.assertEquals(len(expected_course_assets), len(actual_course_assets))
for idx, __ in enumerate(expected_course_assets):
for attr in AssetMetadata.ATTRS_ALLOWED_TO_UPDATE:
if attr in ('edited_on',):
# edited_on is updated upon import.
continue
self.assertEquals(getattr(expected_course_assets[idx], attr), getattr(actual_course_assets[idx], attr))
| agpl-3.0 |
quarckster/cfme_tests | fixtures/datafile.py | 5 | 4048 | import os
import pytest
from fixtures.terminalreporter import reporter
from cfme.utils.datafile import data_path_for_filename, load_data_file
from cfme.utils.path import data_path, log_path
# Collection for storing unique combinations of data file paths
# and filenames for usage reporting after a completed test run
seen_data_files = set()
@pytest.fixture(scope="module")
def datafile(request):
"""datafile(filename, replacements)
datafile fixture, with templating support
Args:
filename: filename to load from the data dir
replacements: template replacements
Returns: Path to the loaded datafile
Usage:
Given a filename, it will attempt to open the given file from the
test's corresponding data dir. For example, this:
datafile('testfile') # in tests/subdir/test_module_name.py
Would return a file object representing this file:
/path/to/cfme_tests/data/subdir/test_module_name/testfile
Given a filename with a leading slash, it will attempt to load the file
relative to the root of the data dir. For example, this:
datafile('/common/testfile') # in tests/subdir/test_module_name.py
Would return a file object representing this file:
/path/to/cfme_tests/data/common/testfile
Note that the test module name is not used with the leading slash.
.. rubric:: Templates:
This fixture can also handle template replacements. If the datafile
being loaded is a python template, the dictionary of replacements
can be passed as the 'replacements' keyword argument. In this case,
the returned data file will be a NamedTemporaryFile prepopulated
with the interpolated result from combining the template with
the replacements mapping.
* http://docs.python.org/2/library/string.html#template-strings
* http://docs.python.org/2/library/tempfile.html#tempfile.NamedTemporaryFile
"""
return _FixtureDataFile(request)
def pytest_addoption(parser):
group = parser.getgroup('cfme')
group.addoption('--udf-report', action='store_true', default=False,
dest='udf_report',
help='flag to generate an unused data files report')
def pytest_sessionfinish(session, exitstatus):
udf_log_file = log_path.join('unused_data_files.log')
if udf_log_file.check():
# Clean up old udf log if it exists
udf_log_file.remove()
if session.config.option.udf_report is False:
# Short out here if not making a report
return
# Output an unused data files log after a test run
data_files = set()
for dirpath, dirnames, filenames in os.walk(str(data_path)):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
data_files.add(filepath)
unused_data_files = data_files - seen_data_files
if unused_data_files:
# Write the log of unused data files out, minus the data dir prefix
udf_log = ''.join(
(line[len(str(data_path)):] + '\n' for line in unused_data_files)
)
udf_log_file.write(udf_log + '\n')
# Throw a notice into the terminal reporter to check the log
tr = reporter()
tr.write_line('')
tr.write_sep(
'-',
'%d unused data files after test run, check %s' % (
len(unused_data_files), udf_log_file.basename
)
)
class _FixtureDataFile(object):
def __init__(self, request):
self.base_path = str(request.session.fspath)
self.testmod_path = str(request.fspath)
def __call__(self, filename, replacements=None):
if filename.startswith('/'):
complete_path = data_path_for_filename(
filename.strip('/'), self.base_path)
else:
complete_path = data_path_for_filename(
filename, self.base_path, self.testmod_path)
seen_data_files.add(complete_path)
return load_data_file(complete_path, replacements)
| gpl-2.0 |
hvos234/raspberrypi.home.website | vendor/Adafruit_Python_DHT/Adafruit_DHT/Test.py | 18 | 1571 | # Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import common
import Test_Driver as driver
def read(sensor, pin):
# Get a reading from C driver code.
result, humidity, temp = driver.read(sensor, pin)
if result in common.TRANSIENT_ERRORS:
# Signal no result could be obtained, but the caller can retry.
return (None, None)
elif result != common.DHT_SUCCESS:
# Some kind of error occured.
raise RuntimeError('Error calling DHT test driver read: {0}'.format(result))
return (humidity, temp)
| bsd-3-clause |
claudio-idra/subterfuge | sslstrip/StrippingProxy.py | 108 | 1244 | # Copyright (c) 2004-2009 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from twisted.web.http import HTTPChannel
from ClientRequest import ClientRequest
class StrippingProxy(HTTPChannel):
'''sslstrip is, at heart, a transparent proxy server that does some unusual things.
This is the basic proxy server class, where we get callbacks for GET and POST methods.
We then proxy these out using HTTP or HTTPS depending on what information we have about
the (connection, client_address) tuple in our cache.
'''
requestFactory = ClientRequest
| gpl-3.0 |
dagwieers/ansible | lib/ansible/modules/network/a10/a10_virtual_server.py | 18 | 11184 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Mischa Peters <mpeters@a10networks.com>,
# Eric Chou <ericc@a10networks.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment:
- a10
- url
options:
state:
description:
- If the specified virtual server should exist.
choices: ['present', 'absent']
default: present
partition:
version_added: "2.3"
description:
- set active-partition
virtual_server:
description:
- The SLB (Server Load Balancing) virtual server name.
required: true
aliases: ['vip', 'virtual']
virtual_server_ip:
description:
- The SLB virtual server IPv4 address.
aliases: ['ip', 'address']
virtual_server_status:
description:
- The SLB virtual server status, such as enabled or disabled.
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_virtual
returned: success
type: str
sample: "mynewvirtualserver"
'''
import json
from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure,
axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except Exception:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
redhat-openstack/django | tests/custom_pk/fields.py | 115 | 1628 | import random
import string
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class MyWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return self.value == other
class MyAutoField(six.with_metaclass(models.SubfieldBase, models.CharField)):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 10
super(MyAutoField, self).__init__(*args, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname, None)
if not value:
value = MyWrapper(''.join(random.sample(string.ascii_lowercase, 10)))
setattr(instance, self.attname, value)
return value
def to_python(self, value):
if not value:
return
if not isinstance(value, MyWrapper):
value = MyWrapper(value)
return value
def get_db_prep_save(self, value, connection):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
| bsd-3-clause |
degs098/python-social-auth | social/tests/backends/test_xing.py | 92 | 6199 | import json
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class XingOAuth1Test(OAuth1Test):
backend_path = 'social.backends.xing.XingOAuth'
user_data_url = 'https://api.xing.com/v1/users/me.json'
expected_username = 'FooBar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'user_id': '123456_abcdef'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({
'users': [{
'id': '123456_abcdef',
'first_name': 'Foo',
'last_name': 'Bar',
'display_name': 'Foo Bar',
'page_name': 'Foo_Bar',
'permalink': 'https://www.xing.com/profile/Foo_Bar',
'gender': 'm',
'birth_date': {
'day': 12,
'month': 8,
'year': 1963
},
'active_email': 'foo@bar.com',
'time_zone': {
'name': 'Europe/Copenhagen',
'utc_offset': 2.0
},
'premium_services': ['SEARCH', 'PRIVATEMESSAGES'],
'badges': ['PREMIUM', 'MODERATOR'],
'wants': 'Nothing',
'haves': 'Skills',
'interests': 'Foo Foo',
'organisation_member': 'ACM, GI',
'languages': {
'de': 'NATIVE',
'en': 'FLUENT',
'fr': None,
'zh': 'BASIC'
},
'private_address': {
'city': 'Foo',
'country': 'DE',
'zip_code': '20357',
'street': 'Bar',
'phone': '12|34|1234560',
'fax': '||',
'province': 'Foo',
'email': 'foo@bar.com',
'mobile_phone': '12|3456|1234567'
},
'business_address': {
'city': 'Foo',
'country': 'DE',
'zip_code': '20357',
'street': 'Bar',
'phone': '12|34|1234569',
'fax': '12|34|1234561',
'province': 'Foo',
'email': 'foo@bar.com',
'mobile_phone': '12|345|12345678'
},
'web_profiles': {
'qype': ['http://qype.de/users/foo'],
'google_plus': ['http://plus.google.com/foo'],
'blog': ['http://blog.example.org'],
'homepage': ['http://example.org', 'http://other-example.org']
},
'instant_messaging_accounts': {
'skype': 'foobar',
'googletalk': 'foobar'
},
'professional_experience': {
'primary_company': {
'name': 'XING AG',
'title': 'Softwareentwickler',
'company_size': '201-500',
'tag': None,
'url': 'http://www.xing.com',
'career_level': 'PROFESSIONAL_EXPERIENCED',
'begin_date': '2010-01',
'description': None,
'end_date': None,
'industry': 'AEROSPACE'
},
'non_primary_companies': [{
'name': 'Ninja Ltd.',
'title': 'DevOps',
'company_size': None,
'tag': 'NINJA',
'url': 'http://www.ninja-ltd.co.uk',
'career_level': None,
'begin_date': '2009-04',
'description': None,
'end_date': '2010-07',
'industry': 'ALTERNATIVE_MEDICINE'
}, {
'name': None,
'title': 'Wiss. Mitarbeiter',
'company_size': None,
'tag': 'OFFIS',
'url': 'http://www.uni.de',
'career_level': None,
'begin_date': '2007',
'description': None,
'end_date': '2008',
'industry': 'APPAREL_AND_FASHION'
}, {
'name': None,
'title': 'TEST NINJA',
'company_size': '201-500',
'tag': 'TESTCOMPANY',
'url': None,
'career_level': 'ENTRY_LEVEL',
'begin_date': '1998-12',
'description': None,
'end_date': '1999-05',
'industry': 'ARTS_AND_CRAFTS'
}],
'awards': [{
'name': 'Awesome Dude Of The Year',
'date_awarded': 2007,
'url': None
}]
},
'educational_background': {
'schools': [{
'name': 'Foo University',
'degree': 'MSc CE/CS',
'notes': None,
'subject': None,
'begin_date': '1998-08',
'end_date': '2005-02'
}],
'qualifications': ['TOEFLS', 'PADI AOWD']
},
'photo_urls': {
'large': 'http://www.xing.com/img/users/e/3/d/'
'f94ef165a.123456,1.140x185.jpg',
'mini_thumb': 'http://www.xing.com/img/users/e/3/d/'
'f94ef165a.123456,1.18x24.jpg',
'thumb': 'http://www.xing.com/img/users/e/3/d/'
'f94ef165a.123456,1.30x40.jpg',
'medium_thumb': 'http://www.xing.com/img/users/e/3/d/'
'f94ef165a.123456,1.57x75.jpg',
'maxi_thumb': 'http://www.xing.com/img/users/e/3/d/'
'f94ef165a.123456,1.70x93.jpg'
}
}]
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.