repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
autodesk-forks/WinObjC | deps/scripts/createlibs.py | 8 | 3857 | import subprocess, sys
BUILD_MSBUILD_LIBS = False
VCVARSALL_PATH = '"C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\vcvarsall.bat"'
MSBUILD_PATH = '"C:\\program files (x86)\\msbuild\\14.0\\bin\\msbuild.exe"'
THIRDPARTY_PATH = '..\\3rdparty\\'
LIBJPEG_PROJ = '..\\3rdparty\\libjpeg\\build\\libjpegWin10\\libjpeg.vcxproj'
LIBXML_PROJ = '..\\3rdparty\\libxml2legacy\\Win10\\libxml2.Win10.vcxproj'
PIXMAN_PROJ = '..\\3rdparty\\pixmanlegacy\\Win10\\pixmanlegacy.Windows10\\pixmanlegacy.Windows10.vcxproj'
OPENAL_PROJ = '..\\3rdparty\\openal-soft-winphone\\winrt.vs2015\\OpenAL\\OpenAL.vcxproj'
CASSOWARY_PROJ = '..\\3rdparty\\cassowary-0.60\\cassowary-0.60-Windows.vcxproj'
LIBOBJC_PROJ = '..\\3rdparty\\libobjc2\\msvc\\libobjc2.vcxproj'
LIBDISPATCH_PROJ = '..\\3rdparty\\libdispatch\\build\\libdispatch.vcxproj'
RELEASE_X86 = '/p:Configuration=Release;Platform=x86'
RELEASE_X64 = '/p:Configuration=Release;Platform=x64'
RELEASE_ARM = '/p:Configuration=Release;Platform=ARM'
DEBUG_X86 = '/p:Configuration=Debug;Platform=x86'
DEBUG_X64 = '/p:Configuration=Debug;Platform=x64'
DEBUG_ARM = '/p:Configuration=Debug;Platform=ARM'
def run():
if BUILD_MSBUILD_LIBS == True:
subprocess.call(MSBUILD_PATH + ' ' + LIBJPEG_PROJ + ' ' + RELEASE_X86)
subprocess.call(MSBUILD_PATH + ' ' + LIBJPEG_PROJ + ' ' + RELEASE_X64)
subprocess.call(MSBUILD_PATH + ' ' + LIBJPEG_PROJ + ' ' + RELEASE_ARM)
subprocess.call(MSBUILD_PATH + ' ' + LIBXML_PROJ + ' ' + RELEASE_X86)
subprocess.call(MSBUILD_PATH + ' ' + LIBXML_PROJ + ' ' + RELEASE_X64)
subprocess.call(MSBUILD_PATH + ' ' + LIBXML_PROJ + ' ' + RELEASE_ARM)
subprocess.call(MSBUILD_PATH + ' ' + PIXMAN_PROJ + ' ' + RELEASE_X86)
subprocess.call(MSBUILD_PATH + ' ' + PIXMAN_PROJ + ' ' + RELEASE_X64)
subprocess.call(MSBUILD_PATH + ' ' + PIXMAN_PROJ + ' ' + RELEASE_ARM)
print "Building freetype2"
subprocess.call("python freetype2.py")
print "Building zlib"
subprocess.call("python zlib.py")
print "Building libpng"
subprocess.call("python libpng.py")
print "Building cairo"
subprocess.call(VCVARSALL_PATH + ' x86 && cairo.bat x86')
subprocess.call(VCVARSALL_PATH + ' x86_x64 && cairo.bat x64')
subprocess.call(VCVARSALL_PATH + ' x86_arm && cairo.bat arm')
print "Building openssl"
# Need to update openssl submodule
#subprocess.call(".\openssl.bat ARM x86 x64")
if BUILD_MSBUILD_LIBS == True:
subprocess.call(MSBUILD_PATH + ' ' + OPENAL_PROJ + ' ' + RELEASE_X86)
subprocess.call(MSBUILD_PATH + ' ' + OPENAL_PROJ + ' ' + RELEASE_X64)
subprocess.call(MSBUILD_PATH + ' ' + OPENAL_PROJ + ' ' + RELEASE_ARM)
subprocess.call(MSBUILD_PATH + ' ' + CASSOWARY_PROJ + ' ' + RELEASE_X86)
subprocess.call(MSBUILD_PATH + ' ' + CASSOWARY_PROJ + ' ' + RELEASE_ARM)
subprocess.call(MSBUILD_PATH + ' ' + CASSOWARY_PROJ + ' ' + DEBUG_X86)
subprocess.call(MSBUILD_PATH + ' ' + CASSOWARY_PROJ + ' ' + DEBUG_ARM)
subprocess.call(MSBUILD_PATH + ' ' + LIBOBJC_PROJ + ' ' + RELEASE_X86)
subprocess.call(MSBUILD_PATH + ' ' + LIBOBJC_PROJ + ' ' + RELEASE_ARM)
subprocess.call(MSBUILD_PATH + ' ' + LIBOBJC_PROJ + ' ' + DEBUG_X86)
subprocess.call(MSBUILD_PATH + ' ' + LIBOBJC_PROJ + ' ' + DEBUG_ARM)
subprocess.call(MSBUILD_PATH + ' ' + LIBDISPATCH_PROJ + ' ' + RELEASE_X86)
subprocess.call(MSBUILD_PATH + ' ' + LIBDISPATCH_PROJ + ' ' + RELEASE_ARM)
subprocess.call(MSBUILD_PATH + ' ' + LIBDISPATCH_PROJ + ' ' + DEBUG_X86)
subprocess.call(MSBUILD_PATH + ' ' + LIBDISPATCH_PROJ + ' ' + DEBUG_ARM)
if __name__ == "__main__":
if len(sys.argv) > 1:
if sys.argv[1] == '1' or sys.argv[1] == 'true' or sys.argv[1] == 'True':
BUILD_MSBUILD_LIBS = True;
run() | mit |
jody-frankowski/ansible | lib/ansible/runner/action_plugins/fail.py | 161 | 1472 | # Copyright 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
''' Fail with custom message '''
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# note: the fail module does not need to pay attention to check mode
# it always runs.
args = {}
if complex_args:
args.update(complex_args)
args.update(utils.parse_kv(module_args))
if not 'msg' in args:
args['msg'] = 'Failed as requested from task'
result = dict(failed=True, msg=args['msg'])
return ReturnData(conn=conn, result=result)
| gpl-3.0 |
NewPresident1/kitsune | kitsune/questions/api.py | 9 | 18088 | from datetime import datetime
import actstream.actions
import django_filters
import json
from django.db.models import Q
from rest_framework import serializers, viewsets, permissions, filters, status
from rest_framework.decorators import action
from rest_framework.response import Response
from taggit.models import Tag
from kitsune.products.api import TopicField
from kitsune.questions.models import (
Question, Answer, QuestionMetaData, AlreadyTakenException,
InvalidUserException, QuestionVote, AnswerVote)
from kitsune.sumo.api import (
DateTimeUTCField, OnlyCreatorEdits, GenericAPIException, SplitSourceField)
from kitsune.tags.utils import add_existing_tag
from kitsune.users.api import ProfileFKSerializer
from kitsune.users.models import Profile
class QuestionMetaDataSerializer(serializers.ModelSerializer):
question = serializers.PrimaryKeyRelatedField(
required=False, write_only=True)
class Meta:
model = QuestionMetaData
fields = ('name', 'value', 'question')
def get_identity(self, obj):
return obj['name']
def restore_object(self, attrs, instance=None):
"""
Given a dictionary of deserialized field values, either update
an existing model instance, or create a new model instance.
"""
if instance is not None:
for key in self.Meta.fields:
setattr(instance, key, attrs.get(key, getattr(instance, key)))
return instance
else:
obj, created = self.Meta.model.objects.get_or_create(
question=attrs['question'], name=attrs['name'],
defaults={'value': attrs['value']})
if not created:
obj.value = attrs['value']
obj.save()
return obj
class QuestionTagSerializer(serializers.ModelSerializer):
question = serializers.PrimaryKeyRelatedField(
required=False, write_only=True)
class Meta:
model = Tag
fields = ('name', 'slug')
class QuestionSerializer(serializers.ModelSerializer):
content = SplitSourceField(read_source='content_parsed', write_source='content')
created = DateTimeUTCField(read_only=True)
creator = serializers.SerializerMethodField('get_creator')
involved = serializers.SerializerMethodField('get_involved_users')
is_solved = serializers.Field(source='is_solved')
is_taken = serializers.Field(source='is_taken')
metadata = QuestionMetaDataSerializer(source='metadata_set', required=False)
num_votes = serializers.Field(source='num_votes')
product = serializers.SlugRelatedField(required=True, slug_field='slug')
tags = QuestionTagSerializer(source='tags', read_only=True)
solution = serializers.PrimaryKeyRelatedField(read_only=True)
solved_by = serializers.SerializerMethodField('get_solved_by')
taken_by = serializers.SerializerMethodField('get_taken_by')
topic = TopicField(required=True)
updated = DateTimeUTCField(read_only=True)
updated_by = serializers.SerializerMethodField('get_updated_by')
class Meta:
model = Question
fields = (
'answers',
'content',
'created',
'creator',
'id',
'involved',
'is_archived',
'is_locked',
'is_solved',
'is_spam',
'is_taken',
'last_answer',
'locale',
'metadata',
'tags',
'num_answers',
'num_votes_past_week',
'num_votes',
'product',
'solution',
'taken_until',
'taken_by',
'title',
'topic',
'updated_by',
'updated',
)
def get_involved_users(self, obj):
involved = set([Profile.objects.get(user=obj.creator)])
involved.update(Profile.objects.get(user=a.creator) for a in obj.answers.all())
return ProfileFKSerializer(involved, many=True).data
def get_solved_by(self, obj):
return ProfileFKSerializer(obj.solution.creator).data if obj.solution else None
def get_creator(self, obj):
return ProfileFKSerializer(Profile.objects.get(user=obj.creator)).data
def get_taken_by(self, obj):
taken_by = Profile.objects.get(user=obj.taken_by) if obj.taken_by else None
return ProfileFKSerializer(taken_by).data if taken_by else None
def get_updated_by(self, obj):
updated_by = Profile.objects.get(user=obj.updated_by) if obj.updated_by else None
return ProfileFKSerializer(updated_by).data if updated_by else None
def validate_creator(self, attrs, source):
user = getattr(self.context.get('request'), 'user')
if user and not user.is_anonymous() and attrs.get(source) is None:
attrs['creator'] = user
return attrs
class QuestionFKSerializer(QuestionSerializer):
class Meta:
model = Question
fields = (
'creator',
'id',
'title',
)
class QuestionFilter(django_filters.FilterSet):
product = django_filters.CharFilter(name='product__slug')
creator = django_filters.CharFilter(name='creator__username')
involved = django_filters.MethodFilter(action='filter_involved')
is_solved = django_filters.MethodFilter(action='filter_is_solved')
is_taken = django_filters.MethodFilter(action='filter_is_taken')
metadata = django_filters.MethodFilter(action='filter_metadata')
solved_by = django_filters.MethodFilter(action='filter_solved_by')
taken_by = django_filters.CharFilter(name='taken_by__username')
class Meta(object):
model = Question
fields = [
'creator',
'created',
'involved',
'is_archived',
'is_locked',
'is_solved',
'is_spam',
'is_taken',
'locale',
'num_answers',
'product',
'solved_by',
'taken_by',
'title',
'topic',
'updated',
'updated_by',
]
def filter_involved(self, queryset, username):
# This will remain unevaluated, and become a subquery of the final query.
# Using a subquery instead of a JOIN like Django would normally do
# should be faster in this case.
questions_user_answered = (
Answer.objects.filter(creator__username=username).values('question_id'))
answered_filter = Q(id__in=questions_user_answered)
creator_filter = Q(creator__username=username)
return queryset.filter(creator_filter | answered_filter)
def filter_is_taken(self, queryset, value):
field = serializers.BooleanField()
value = field.from_native(value)
# is_taken doesn't exist. Instead, we decide if a question is taken
# based on ``taken_by`` and ``taken_until``.
now = datetime.now()
if value:
# only taken questions
return queryset.filter(~Q(taken_by=None), taken_until__gt=now)
else:
# only not taken questions
return queryset.filter(Q(taken_by=None) | Q(taken_until__lt=now))
def filter_is_solved(self, queryset, value):
field = serializers.BooleanField()
value = field.from_native(value)
solved_filter = Q(solution=None)
if value:
solved_filter = ~solved_filter
return queryset.filter(solved_filter)
def filter_solved_by(self, queryset, username):
question_user_solved = (
Question.objects.filter(solution__creator__username=username).values('id'))
return queryset.filter(id__in=question_user_solved)
def filter_metadata(self, queryset, value):
try:
value = json.loads(value)
except ValueError:
raise GenericAPIException(400, 'metadata must be valid JSON.')
for name, values in value.items():
if not isinstance(values, list):
values = [values]
query = Q()
for v in values:
if v is None:
query = query | ~Q(metadata_set__name=name)
else:
query = query | Q(metadata_set__name=name, metadata_set__value=v)
queryset = queryset.filter(query)
return queryset
class QuestionViewSet(viewsets.ModelViewSet):
serializer_class = QuestionSerializer
queryset = Question.objects.all()
paginate_by = 20
permission_classes = [
OnlyCreatorEdits,
permissions.IsAuthenticatedOrReadOnly,
]
filter_class = QuestionFilter
filter_backends = [
filters.DjangoFilterBackend,
filters.OrderingFilter,
]
ordering_fields = [
'id',
'created',
'last_answer',
'num_answers',
'num_votes_past_week',
'updated',
]
# Default, if not overwritten
ordering = ('-id',)
@action(methods=['POST'])
def solve(self, request, pk=None):
"""Accept an answer as the solution to the question."""
question = self.get_object()
answer_id = request.DATA.get('answer')
try:
answer = Answer.objects.get(pk=answer_id)
except Answer.DoesNotExist:
return Response({'answer': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
question.set_solution(answer, request.user)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def helpful(self, request, pk=None):
question = self.get_object()
if not question.editable:
raise GenericAPIException(403, 'Question not editable')
if question.has_voted(request):
raise GenericAPIException(409, 'Cannot vote twice')
QuestionVote(question=question, creator=request.user).save()
num_votes = QuestionVote.objects.filter(question=question).count()
return Response({'num_votes': num_votes})
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def follow(self, request, pk=None):
question = self.get_object()
actstream.actions.follow(request.user, question, actor_only=False, send_action=False)
return Response('', status=204)
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def unfollow(self, request, pk=None):
question = self.get_object()
actstream.actions.unfollow(request.user, question, send_action=False)
return Response('', status=204)
@action(methods=['POST'])
def set_metadata(self, request, pk=None):
data = {}
data.update(request.DATA)
data['question'] = self.get_object().pk
serializer = QuestionMetaDataSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@action(methods=['POST', 'DELETE'])
def delete_metadata(self, request, pk=None):
question = self.get_object()
if 'name' not in request.DATA:
return Response({'name': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
try:
meta = (QuestionMetaData.objects
.get(question=question, name=request.DATA['name']))
meta.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except QuestionMetaData.DoesNotExist:
raise GenericAPIException(404, 'No matching metadata object found.')
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def take(self, request, pk=None):
question = self.get_object()
field = serializers.BooleanField()
force = field.from_native(request.DATA.get('force', False))
try:
question.take(request.user, force=force)
except InvalidUserException:
raise GenericAPIException(400, 'Question creator cannot take a question.')
except AlreadyTakenException:
raise GenericAPIException(409, 'Conflict: question is already taken.')
return Response(status=204)
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def add_tags(self, request, pk=None):
question = self.get_object()
if 'tags' not in request.DATA:
return Response({'tags': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
tags = request.DATA['tags']
for tag in tags:
try:
add_existing_tag(tag, question.tags)
except Tag.DoesNotExist:
if request.user.has_perm('taggit.add_tag'):
question.tags.add(tag)
else:
raise GenericAPIException(403, 'You are not authorized to create new tags.')
tags = question.tags.all()
return Response(QuestionTagSerializer(tags).data)
@action(methods=['POST', 'DELETE'], permission_classes=[permissions.IsAuthenticated])
def remove_tags(self, request, pk=None):
question = self.get_object()
if 'tags' not in request.DATA:
return Response({'tags': 'This field is required.'},
status=status.HTTP_400_BAD_REQUEST)
tags = request.DATA['tags']
for tag in tags:
question.tags.remove(tag)
return Response(status=status.HTTP_204_NO_CONTENT)
class AnswerSerializer(serializers.ModelSerializer):
content = SplitSourceField(read_source='content_parsed', write_source='content')
created = DateTimeUTCField(read_only=True)
creator = serializers.SerializerMethodField('get_creator')
num_helpful_votes = serializers.Field(source='num_helpful_votes')
num_unhelpful_votes = serializers.Field(source='num_unhelpful_votes')
updated = DateTimeUTCField(read_only=True)
updated_by = serializers.SerializerMethodField('get_updated_by')
class Meta:
model = Answer
fields = (
'id',
'question',
'content',
'created',
'creator',
'updated',
'updated_by',
'is_spam',
'num_helpful_votes',
'num_unhelpful_votes',
)
def get_creator(self, obj):
return ProfileFKSerializer(Profile.objects.get(user=obj.creator)).data
def get_updated_by(self, obj):
updated_by = Profile.objects.get(user=obj.updated_by) if obj.updated_by else None
return ProfileFKSerializer(updated_by).data if updated_by else None
def validate_creator(self, attrs, source):
user = getattr(self.context.get('request'), 'user')
if user and not user.is_anonymous() and attrs.get('creator') is None:
attrs['creator'] = user
return attrs
class AnswerFKSerializer(AnswerSerializer):
class Meta:
model = Answer
fields = (
'id',
'question',
'creator',
)
class AnswerFilter(django_filters.FilterSet):
creator = django_filters.CharFilter(name='creator__username')
question = django_filters.Filter(name='question__id')
class Meta(object):
model = Answer
fields = [
'question',
'creator',
'created',
'updated',
'updated_by',
'is_spam',
]
class AnswerViewSet(viewsets.ModelViewSet):
serializer_class = AnswerSerializer
queryset = Answer.objects.all()
paginate_by = 20
permission_classes = [
OnlyCreatorEdits,
permissions.IsAuthenticatedOrReadOnly,
]
filter_class = AnswerFilter
filter_backends = [
filters.DjangoFilterBackend,
filters.OrderingFilter,
]
filter_fields = [
'question',
'created',
'creator',
'updated',
'updated_by',
]
ordering_fields = [
'id',
'created',
'updated',
]
# Default, if not overwritten
ordering = ('-id',)
def get_pagination_serializer(self, page):
"""
Return a serializer instance to use with paginated data.
"""
class SerializerClass(self.pagination_serializer_class):
class Meta:
object_serializer_class = AnswerSerializer
context = self.get_serializer_context()
return SerializerClass(instance=page, context=context)
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def helpful(self, request, pk=None):
answer = self.get_object()
if not answer.question.editable:
raise GenericAPIException(403, 'Answer not editable')
if answer.has_voted(request):
raise GenericAPIException(409, 'Cannot vote twice')
AnswerVote(answer=answer, creator=request.user, helpful=True).save()
num_helpful_votes = AnswerVote.objects.filter(answer=answer, helpful=True).count()
num_unhelpful_votes = AnswerVote.objects.filter(answer=answer, helpful=False).count()
return Response({
'num_helpful_votes': num_helpful_votes,
'num_unhelpful_votes': num_unhelpful_votes,
})
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def follow(self, request, pk=None):
answer = self.get_object()
actstream.actions.follow(request.user, answer, actor_only=False)
return Response('', status=204)
@action(methods=['POST'], permission_classes=[permissions.IsAuthenticated])
def unfollow(self, request, pk=None):
answer = self.get_object()
actstream.actions.unfollow(request.user, answer)
return Response('', status=204)
| bsd-3-clause |
vasumv/pokemon_ai | showdownai/move_predict.py | 1 | 8103 | import random
from data import MOVE_CORRECTIONS, NAME_CORRECTIONS, load_data, correct_name, correct_move
from data import correct_mega
class MovePredictor(object):
def __init__(self, poke, pokedata):
self.pokedata = pokedata
self.poke = poke
def get_moves(self, poke, known_moves):
raise NotImplementedError
def __call__(self, *args):
return self.get_moves(*args)
class RandomMovePredictor(MovePredictor):
def __init__(self, poke, pokedata):
super(RandomMovePredictor, self).__init__(poke, pokedata)
poke_moves = self.pokedata.poke_moves[correct_mega(correct_name(self.poke))]
random.shuffle(poke_moves)
prob = 1.0 / len(poke_moves)
self.predictions = [(x, prob) for x in poke_moves]
def get_moves(self, known_moves):
return self.predictions
class MoveFrequencyPredictor(MovePredictor):
def __init__(self, poke, pokedata):
super(MoveFrequencyPredictor, self).__init__(poke, pokedata)
graph_move = self.pokedata.graph_move
self.poke_moves = self.pokedata.poke_moves[correct_mega(correct_name(self.poke))]
self.co = graph_move['cooccurences']
self.freq = graph_move['frequencies']
def get_freqs(self, freq):
probs = {}
for move in freq:
prob = freq[move]
probs[move] = prob
return probs
def get_moves(self, known_moves):
move_freq = sorted(self.get_freqs(self.freq).items(), key=lambda x: -x[1])
new_move_freq = []
for move in move_freq:
if move[0] in self.poke_moves:
new_move_freq.append(move)
self.predictions = new_move_freq
return self.predictions
class MoveCoPredictor(MovePredictor):
def __init__(self, poke, pokedata):
super(MoveCoPredictor, self).__init__(poke, pokedata)
graph_move = self.pokedata.graph_move
self.poke_moves = self.pokedata.poke_moves[correct_mega(correct_name(self.poke))]
self.co = graph_move['cooccurences']
self.freq = graph_move['frequencies']
def get_freqs(self, freq):
probs = {}
for move in freq:
if move in self.poke_moves:
prob = freq[move]
probs[move] = prob
return probs
def get_moves(self, known_moves):
probs = {}
if len(known_moves) == 0:
probs = self.get_freqs(self.freq)
else:
for move in self.co:
print move
if move in known_moves:
continue
if move not in self.poke_moves:
continue
prob = 1.0
for othermove in known_moves:
if othermove not in self.co[move]:
prob *= 0
continue
prob *= self.co[move][othermove]
if move in MOVE_CORRECTIONS:
probs[MOVE_CORRECTIONS[othermove]] = probs[othermove]
del probs[move]
prob *= self.freq[move]
probs[move] = prob
if probs == {}:
probs = self.get_freqs(self.freq)
self.predictions = sorted(probs.items(), key=lambda x: -x[1])
return self.predictions
def get_moves_assumption_two(self, known_moves):
probs = {}
if len(known_moves) == 0:
probs = self.get_freqs(self.freq)
else:
for move in known_moves:
if move not in self.co:
continue
for othermove in self.co[move]:
prob = 1.0
if othermove in MOVE_CORRECTIONS:
probs[MOVE_CORRECTIONS[othermove]] = probs[othermove]
del probs[move]
if othermove not in self.poke_moves:
continue
if othermove in known_moves:
continue
prob = self.co[move][othermove]
if othermove not in probs:
probs[othermove] = 1
probs[othermove] *= prob
if probs == {}:
probs = self.get_freqs(self.freq)
self.predictions = sorted(probs.items(), key=lambda x: -x[1])
return self.predictions
class PokeFrequencyPredictor(MovePredictor):
def __init__(self, poke, pokedata):
super(PokeFrequencyPredictor, self).__init__(poke, pokedata)
graph_poke = self.pokedata.graph_poke
self.co = graph_poke['cooccurences']
self.freq = graph_poke['frequencies']
def get_freqs(self, freq):
poke = correct_name(self.poke)
poke = correct_mega(poke)
probs = {}
for move in freq[poke]:
prob = freq[poke][move]
probs[move] = prob
return probs
def get_moves(self, known_moves):
poke = correct_name(self.poke)
poke = correct_mega(poke)
probs = {}
if len(known_moves) == 0:
probs = self.get_freqs(self.freq)
else:
for move in self.co[poke]:
if move in known_moves:
continue
prob = 1.0
for othermove in known_moves:
if othermove not in self.co[poke][move]:
prob *= 0
continue
prob *= self.co[poke][move][othermove]
if move in MOVE_CORRECTIONS:
probs[MOVE_CORRECTIONS[othermove]] = probs[othermove]
del probs[move]
prob *= self.freq[poke][move]
probs[move] = prob
#else:
#for move in known_moves:
#if move not in self.co[poke]:
#continue
#for othermove in self.co[poke][move]:
#if othermove in MOVE_CORRECTIONS:
#probs[MOVE_CORRECTIONS[othermove]] = probs[othermove]
#del probs[move]
#if othermove in known_moves:
#continue
#prob = self.co[poke][move][othermove]
#if othermove not in probs:
#probs[othermove] = 1
#probs[othermove] *= prob
if probs == {}:
probs = self.get_freqs(poke, self.freq)
self.predictions = sorted(probs.items(), key=lambda x: -x[1])
return self.predictions
def get_moves_assumption_two(self, known_moves):
poke = correct_name(self.poke)
poke = correct_mega(self.poke)
probs = {}
if len(known_moves) == 0:
probs = self.get_freqs(poke, self.freq)
else:
for move in known_moves:
if move not in self.co[poke]:
continue
for othermove in self.co[poke][move]:
if othermove in MOVE_CORRECTIONS:
probs[MOVE_CORRECTIONS[othermove]] = probs[othermove]
del probs[move]
if othermove in known_moves:
continue
prob = self.co[poke][move][othermove]
if othermove not in probs:
probs[othermove] = 1
probs[othermove] *= prob
if probs == {}:
probs = self.get_freqs(poke, self.freq)
self.predictions = sorted(probs.items(), key=lambda x: -x[1])
return self.predictions
def create_predictor(name, poke, pokedata):
return PREDICTORS[name](poke, pokedata)
PREDICTORS = {
'RandomMovePredictor': RandomMovePredictor,
'PokeFrequencyPredictor': PokeFrequencyPredictor,
'MoveFrequencyPredictor': MoveFrequencyPredictor,
'MoveCoPredictor': MoveCoPredictor
}
if __name__ == "__main__":
pokedata = load_data("data")
def foo(poke, moves):
return MoveCoPredictor(poke, pokedata)(moves)
movepredictor = MoveCoPredictor("Charizard", pokedata)
| mit |
jbuberel/gcloud-python | gcloud/storage/test_connection.py | 18 | 1712 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestConnection(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.storage.connection import Connection
return Connection
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_build_api_url_no_extra_query_params(self):
conn = self._makeOne()
URI = '/'.join([
conn.API_BASE_URL,
'storage',
conn.API_VERSION,
'foo',
])
self.assertEqual(conn.build_api_url('/foo'), URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._makeOne()
uri = conn.build_api_url('/foo', {'bar': 'baz'})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path,
'/'.join(['', 'storage', conn.API_VERSION, 'foo']))
parms = dict(parse_qsl(qs))
self.assertEqual(parms['bar'], 'baz')
| apache-2.0 |
ChristopherHogan/numpy | numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
foss-transportationmodeling/rettina-server | .env/lib/python2.7/encodings/mac_romanian.py | 593 | 13917 | """ Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-romanian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
u'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
danielelinaro/BAL | python/pybal/util.py | 1 | 3521 |
from tables import openFile
from pylab import imshow, axis, show, cm
from numpy import histogram, nonzero, zeros, linspace
def readH5file(filename):
"""
Reads data from an H5 file.
Syntax:
data = readH5file(filename)
where filename is the name of the H5 file and data is a vector of
dictionaries that stores the data contained in filename.
Every element of the vector data is a dictionary with the following
keys:
par - the parameter vector of the integration
t - the time vector
x - the state vector
labels - the labels associated to every step of the
integration. The meaning of the labels is as follows:
-10 integration error
-3 equilibrium point
-2 initial conditions
-1 state at the end of transient evolution
0 regular step
+i intersection with the i-th Poincare' section.
Author:
Daniele Linaro
daniele.linaro@unige.it
June 2010
"""
fid = openFile(filename, 'r')
data = []
for node in fid:
if not node._v_name == '/':
try:
pars = getattr(node.attrs,'parameters')
except AttributeError:
pars = []
data.append({'par': pars,'t': node[:,0],'x': node[:,1:-1],'labels': node[:,-1]})
fid.close()
return data
def saveH5file(solutions,filename):
"""
Saves data to an H5 file.
Syntax:
saveH5file(solutions,filename)
where solutions is an array of balSolution objects and filename is the name
of the H5 file where data will be saved.
Author:
Daniele Linaro
daniele.linaro@unige.it
June 2010
"""
import tables as tbl
from numpy import zeros, array, reshape, shape
print 'Saving data...'
# open the file
fid = tbl.openFile(filename,mode='w',title='BAL data file')
# create a filter for compression
filter = tbl.Filters(complevel=5, complib='zlib', shuffle=True, fletcher32=False)
# the type of data that will be stored
atom = tbl.Float32Atom()
# save all solutions
for k,s in enumerate(solutions):
# the letter P in the name means that the H5 file has been saved in Python
name = 'P' + str(k+1).zfill(6)
m = len(s.data['t'])
n = len(s.data['x'])/m
node = fid.createCArray('/',name,atom,(m,n+2),filters=filter)
node[0:,0] = array(s.data['t'])
node[0:,1:-1] = reshape(array(s.data['x']),(m,n))
node[0:,-1] = array(s.data['labels'])
if s.parameters != []:
node.attrs.parameters = array(s.parameters)
# close the file
fid.close()
def bif1d(data, coord=0, ap=0, event=1, coeff = [1.4425, -2.4283,1.9727, -0.0001]) :
nlevels = 1024
npars = len(data)
pmin = data[0]['par'][ap]
pmax = data[-1]['par'][ap]
i = 0
while len(data[i]['t']) < 3:
i = i+1
idx = nonzero(data[i]['labels'] == event)[0]
coord_min = min(data[i]['x'][idx,coord])
coord_max = max(data[i]['x'][idx,coord])
for k in range(i,npars):
if len(data[k]['t']) > 2:
idx = nonzero(data[k]['labels'] == event)[0]
tmp_min = min(data[k]['x'][idx,coord])
tmp_max = max(data[k]['x'][idx,coord])
if tmp_min < coord_min:
coord_min = tmp_min
if tmp_max > coord_max:
coord_max = tmp_max
x = linspace(pmin, pmax, npars)
y = linspace(coord_min, coord_max, nlevels+1)
bifdiag = zeros([nlevels,npars])
for k,entry in enumerate(data):
if len(entry['t']) == 2:
continue
idx = nonzero(entry['labels'] == event)[0]
bifdiag[:,k] = histogram(entry['x'][idx,coord], y)[0]
nth = 100
bifdiag[bifdiag > nth] = nth;
bifdiag = bifdiag / nth;
bifdiag = coeff[0]*bifdiag**3 + coeff[1]*bifdiag**2 + coeff[2]*bifdiag + coeff[3];
imshow(bifdiag, cmap=cm.gray, extent=[pmin,pmax,coord_min,coord_max])
axis('tight')
show()
| mit |
numenta/nupic | examples/tm/tm_high_order.py | 15 | 17726 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple tutorial that shows some features of the Temporal Memory.
The following program has the purpose of presenting some
basic properties of the Temporal Memory, in particular when it comes
to how it handles high-order sequences.
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nupic.bindings.algorithms import TemporalMemory as TM
def accuracy(current, predicted):
"""
Computes the accuracy of the TM at time-step t based on the prediction
at time-step t-1 and the current active columns at time-step t.
@param current (array) binary vector containing current active columns
@param predicted (array) binary vector containing predicted active columns
@return acc (float) prediction accuracy of the TM at time-step t
"""
accuracy = 0
if np.count_nonzero(predicted) > 0:
accuracy = float(np.dot(current, predicted))/float(np.count_nonzero(predicted))
return accuracy
def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype="uint32")
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0
else:
v2[i] = 1
return v2
def showPredictions():
"""
Shows predictions of the TM when presented with the characters A, B, C, D, X, and
Y without any contextual information, that is, not embedded within a sequence.
"""
for k in range(6):
tm.reset()
print "--- " + "ABCDXY"[k] + " ---"
tm.compute(set(seqT[k][:].nonzero()[0].tolist()), learn=False)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
print("Active cols: " + str(np.nonzero(currentColumns)[0]))
print("Predicted cols: " + str(np.nonzero(predictedColumns)[0]))
print ""
def trainTM(sequence, timeSteps, noiseLevel):
"""
Trains the TM with given sequence for a given number of time steps and level of input
corruption
@param sequence (array) array whose rows are the input characters
@param timeSteps (int) number of time steps in which the TM will be presented with sequence
@param noiseLevel (float) amount of noise to be applied on the characters in the sequence
"""
currentColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
predictedColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
ts = 0
for t in range(timeSteps):
tm.reset()
for k in range(4):
v = corruptVector(sequence[k][:], noiseLevel, sparseCols)
tm.compute(set(v[:].nonzero()[0].tolist()), learn=True)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
acc = accuracy(currentColumns, predictedColumns)
x.append(ts)
y.append(acc)
ts += 1
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
uintType = "uint32"
random.seed(1)
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
sparsity = 0.02
sparseCols = int(tm.numberOfColumns() * sparsity)
# We will create a sparse representation of characters A, B, C, D, X, and Y.
# In this particular example we manually construct them, but usually you would
# use the spatial pooler to build these.
seq1 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq1[0, 0:sparseCols] = 1 # Input SDR representing "A"
seq1[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq1[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq1[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seq2 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq2[0, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seq2[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq2[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq2[3, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
seqT = np.zeros((6, tm.numberOfColumns()), dtype="uint32")
seqT[0, 0:sparseCols] = 1 # Input SDR representing "A"
seqT[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seqT[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seqT[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seqT[4, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seqT[5, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
# PART 1. Feed the TM with sequence "ABCD". The TM will eventually learn
# the pattern and it's prediction accuracy will go to 1.0 (except in-between sequences
# where the TM doesn't output any prediction)
print ""
print "-"*50
print "Part 1. We present the sequence ABCD to the TM. The TM will eventually"
print "will learn the sequence and predict the upcoming characters. This can be"
print "measured by the prediction accuracy in Fig 1."
print "N.B. In-between sequences the accuracy is 0.0 as the TM does not output"
print "any prediction."
print "-"*50
print ""
x = []
y = []
trainTM(seq1, timeSteps=10, noiseLevel=0.0)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 1: TM learns sequence ABCD")
plt.savefig("figure_1")
plt.close()
print ""
print "-"*50
print "Once the TM has learned the sequence ABCD, we will present the individual"
print "characters to the TM to know its prediction. The TM outputs the columns"
print "that become active upon the presentation of a particular character as well"
print "as the columns predicted in the next time step. Here, you should see that"
print "A predicts B, B predicts C, C predicts D, and D does not output any"
print "prediction."
print "N.B. Here, we are presenting individual characters, that is, a character"
print "deprived of context in a sequence. There is no prediction for characters"
print "X and Y as we have not presented them to the TM in any sequence."
print "-"*50
print ""
showPredictions()
print ""
print "-"*50
print "Part 2. We now present the sequence XBCY to the TM. As expected, the accuracy will"
print "drop until the TM learns the new sequence (Fig 2). What will be the prediction of"
print "the TM if presented with the sequence BC? This would depend on what character"
print "anteceding B. This is an important feature of high-order sequences."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
# In this figure you can see how the TM starts making good predictions for particular
# characters (spikes in the plot). Then, it will get half of its predictions right, which
# correspond to the times in which is presented with character C. After some time, it
# will learn correctly the sequence XBCY, and predict its characters accordingly.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 2: TM learns new sequence XBCY")
plt.savefig("figure_2")
plt.close()
print ""
print "-"*50
print "We will present again each of the characters individually to the TM, that is,"
print "not within any of the two sequences. When presented with character A the TM"
print "predicts B, B predicts C, but this time C outputs a simultaneous prediction of"
print "both D and Y. In order to disambiguate, the TM would require to know if the"
print "preceding characters were AB or XB. When presented with character X the TM"
print "predicts B, whereas Y and D yield no prediction."
print "-"*50
print ""
showPredictions()
# PART 3. Now we will present noisy inputs to the TM. We will add noise to the sequence XBCY
# by corrupting 30% of its bits. We would like to see how the TM responds in the presence of
# noise and how it recovers from it.
print ""
print "-"*50
print "Part 3. We will add noise to the sequence XBCY by corrupting 30% of the bits in the vectors"
print "encoding each character. We would expect to see a decrease in prediction accuracy as the"
print "TM is unable to learn the random noise in the input (Fig 3). However, this decrease is not"
print "significant."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.3)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 3: Accuracy in TM with 30% noise in input")
plt.savefig("figure_3")
plt.close()
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input (30%). Here, the noise is low that the TM is not affected by it,"
print "which would be the case if we saw 'noisy' columns being predicted when"
print "presented with individual characters. Thus, we could say that the TM exhibits"
print "resilience to noise in its input."
print "-"*50
print ""
showPredictions()
# Let's corrupt the sequence more by adding 50% of noise to each of its characters.
# Here, we would expect to see some 'noisy' columns being predicted when the TM is
# presented with the individual characters.
print ""
print "-"*50
print "Now, we will set noise to be 50% of the bits in the characters X, B, C, and Y."
print "As expected, the accuracy will decrease (Fig 5) and 'noisy' columns will be"
print "predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.5)
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input. The prediction of some characters (eg. X) now includes columns that"
print "are not related to any other character. This is because the TM tried to learn"
print "the noise in the input patterns."
print "-"*50
print ""
showPredictions()
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 4: Accuracy in TM with 50% noise in input")
plt.savefig("figure_4")
plt.close()
# Will the TM be able to forget the 'noisy' columns learned in the previous step?
# We will present the TM with the original sequence XBCY so it forgets the 'noisy'.
# columns.
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
print ""
print "-"*50
print "After presenting the original sequence XBCY to the TM, we would expect to see"
print "the predicted noisy columns from the previous step disappear. We will verify that"
print "by presenting the individual characters to the TM."
print "-"*50
print ""
showPredictions()
# We can see how the prediction accuracy goes back to 1.0 (as before, not in-between sequences)
# when the TM 'forgets' the noisy columns.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 5: TM forgets noise in sequence XBCY when noise is over")
plt.savefig("figure_5")
plt.close()
# Let's corrupt the sequence even more and add 90% of noise to each of its characters.
# Here, we would expect to see even more of a decrease in accuracy along with more 'noisy'
# columns being predicted.
print ""
print "-"*50
print "We will add more noise to the characters in the sequence XBCY. This time we will"
print "corrupt 90% of its contents. As expected, the accuracy will decrease (Fig 6) and"
print "'noisy' columns will be predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.9)
print ""
print "-"*50
print "Next, we will have a look at the output of the TM when presented with the"
print "individual characters of the sequence. As before, we see 'noisy' predicted"
print "columns emerging as a result of the TM trying to learn the noise."
print "-"*50
print ""
showPredictions()
# In this figure we can observe how the prediction accuracy is affected by the presence
# of noise in the input. However, the accuracy does not drops dramatically even with 90%
# of noise which implies that the TM exhibits some resilience to noise in its input
# which means that it does not forget easily a well-learned, real pattern.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 6: Accuracy with 90% noise in input")
plt.savefig("figure_6")
plt.close()
# Let's present the original sequence to the TM in order to make it forget the noisy columns.
# After this, the TM will predict accurately the sequence again, and its predictions will
# not include 'noisy' columns anymore.
x = []
y = []
trainTM(seq2, timeSteps=25, noiseLevel=0.0)
# We will observe how the prediction accuracy gets back to 1.0 (not in-between sequences)
# as the TM is presented with the original sequence.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 7: When noise is suspended, accuracy is restored")
plt.savefig("figure_7")
plt.close()
# The TM restores its prediction accuracy and it can be seen when presented with the individual characters.
# There's no noisy columns being predicted.
print ""
print "-"*50
print "After presenting noisy input to the TM, we present the original sequence in"
print "order to make it re-learn XBCY. We verify that this was achieved by presenting"
print "the TM with the individual characters and observing its output. Again, we can"
print "see that the 'noisy' columns are not being predicted anymore, and that the"
print "prediction accuracy goes back to 1.0 when the sequence is presented (Fig 7)."
print "-"*50
print ""
showPredictions()
# PART 4. Now, we will present both sequences ABCD and XBCY randomly to the TM.
# For this purpose we will start with a new TM.
# What would be the output of the TM when presented with character D if it has
# been exposed to sequences ABCD and XBCY occurring randomly one after the other?
# If one quarter of the time the TM sees the sequence ABCDABCD, another quarter the
# TM sees ABCDXBCY, another quarter it sees XBCYXBCY, and the last quarter it saw
# XBCYABCD, then the TM would exhibit simultaneous predictions for characters D, Y
# and C.
print ""
print "-"*50
print "Part 4. We will present both sequences ABCD and XBCY randomly to the TM."
print "Here, we might observe simultaneous predictions occurring when the TM is"
print "presented with characters D, Y, and C. For this purpose we will use a"
print "blank TM"
print "NB. Here we will not reset the TM after presenting each sequence with the"
print "purpose of making the TM learn different predictions for D and Y."
print "-"*50
print ""
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
for t in range(75):
rnd = random.randrange(2)
for k in range(4):
if rnd == 0:
tm.compute(set(seq1[k][:].nonzero()[0].tolist()), learn=True)
else:
tm.compute(set(seq2[k][:].nonzero()[0].tolist()), learn=True)
print ""
print "-"*50
print "We now have a look at the output of the TM when presented with the individual"
print "characters A, B, C, D, X, and Y. We might observe simultaneous predictions when"
print "presented with character D (predicting A and X), character Y (predicting A and X),"
print "and when presented with character C (predicting D and Y)."
print "N.B. Due to the stochasticity of this script, we might not observe simultaneous"
print "predictions in *all* the aforementioned characters."
print "-"*50
print ""
showPredictions()
print ""
print "-*"*25
print "Scroll up to see the development of this simple"
print "tutorial. Also open the source file to see more"
print "comments regarding each part of the script."
print "All images generated by this script will be saved"
print "in your current working directory."
print "-*"*25
print ""
| agpl-3.0 |
ambyte/Vertaler | src/packages/requests/packages/oreos/monkeys.py | 7 | 25930 | # -*- coding: utf-8 -*-
"""
oreos.monkeys
~~~~~~~~~~~~~
Monkeypatches.
"""
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~[]_"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs).replace('"',r'\"'), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
elif K == "httponly":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=\[\]\_]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for k, v in rawdata.items():
self[k] = v
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:
| gpl-2.0 |
nicoTrombon/DjangoPolls | env/Lib/encodings/mac_romanian.py | 593 | 13917 | """ Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-romanian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
u'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
vehrka/dhondt | dhondt.py | 1 | 7514 | #!/usr/bin/env python3
# −*− coding: UTF−8 −*−
import sys
from argparse import ArgumentParser
class dhondt():
"""Class to calculate d'Hondt statistics
:Authors: Pedro Ferrer, Silvia Fuentes
:Date: 2015-07-20
:version: 1.1
The minimum data to be providen is:
+ The number of seats [nseats]
+ The minimum percentage to get into the calculation [minper]
+ A dictionary with the votes of the candidatures [dcandi]
dcandi = {'000001': 51000, '000002': 46000, '000007': 34000, '000006': 29000, 'others': 31000}
CAVEAT LECTOR
+ It doesn't resolve seat ties
+ Always gets rid of a party called 'others'
"""
def __init__(self, nseats, minper, dcandi, census=0, blankv=0, sploitv=0):
self.nseats = nseats
self.minper = minper
self.census = census
self.blankv = blankv
self.sploitv = sploitv
self.dcandi = dcandi.copy()
self.calc()
def __repr__(self):
candidatures = sorted(self.dcandi.items(), key=lambda p: p[1], reverse=True)
return '<dhondt nseats:{0} minper:{1} candi:{2}>'.format(self.nseats, self.minper, candidatures)
@property
def nseats(self):
return self.__nseats
@nseats.setter
def nseats(self, nseats):
if type(nseats) is int and nseats > 0:
self.__nseats = nseats
else:
raise AttributeError('The number or seats value must be an integer greater than 0')
@property
def minper(self):
return self.__minper
@minper.setter
def minper(self, minper):
if type(minper) is float and minper > 0:
self.__minper = minper
else:
raise AttributeError('The minimum percentage value must be a float greater than 0')
@property
def census(self):
return self.__census
@census.setter
def census(self, census):
if type(census) is int:
self.__census = census
else:
raise AttributeError('The census value must be an integer')
@property
def blankv(self):
return self.__blankv
@blankv.setter
def blankv(self, blankv):
if type(blankv) is int:
self.__blankv = blankv
else:
raise AttributeError('The blank votes value must be an integer')
@property
def sploitv(self):
return self.__sploitv
@sploitv.setter
def sploitv(self, sploitv):
if type(sploitv) is int:
self.__sploitv = sploitv
else:
raise AttributeError('The sploit votes value must be an integer')
@property
def dcandi(self):
return self.__dcandi
@dcandi.setter
def dcandi(self, dcandi):
if type(dcandi) is dict:
self.__dcandi = dcandi.copy()
try:
sum(dcandi.values())
except TypeError:
raise AttributeError('The candidatures votes values must be integers')
else:
raise AttributeError('The candidatures data must be a dictionary')
def __mindata(self):
if self.nseats and self.minper and self.dcandi:
return True
return False
def calc(self):
"""Performs the calculation"""
if not self.__mindata():
sys.exit('Minimum data not set')
vtot = sum(self.dcandi.values())
# # TODO: Finish script with the RESULTS and PARTICIPATION sections
# ncan = len(self.dcandi)
# if self.census < (vtot + self.blankv + self.sploitv):
# bvcensus = False
# self.census = 0
# nabs = 0
# else:
# bvcensus = True
# nabs = self.census - vtot - self.blankv - self.sploitv
# Sort the candidatures in descending number of votes
candidatures = sorted(self.dcandi.items(), key=lambda p: p[1], reverse=True)
minvot = (((vtot + self.blankv) * self.minper) / 100) - 1
# Filter the candidatures that have not reached the minimum
candismin = list(filter(lambda p: p[1] > minvot, candidatures))
candivali = list(filter(lambda p: p[0] != 'other', candismin))
# candirest = list(filter(lambda p: p[1] < minvot + 1, candidatures))
# Prepare the lists for the calculations
candinames = [p[0] for p in candivali]
candimaxis = [p[1] for p in candivali]
canditrab = [(p[1], 0) for p in candivali]
# Prepare the dictionaries for the results
self.repre = dict(zip(candinames, [0 for name in candinames]))
self.asigna = dict(zip(candinames, [[maxi] for maxi in candimaxis]))
# Perform the seat calculation
for i in range(self.nseats):
# Find the party with the maximum nunber of votes in this round
dic01 = dict(zip(candinames, canditrab))
odic01 = sorted(dic01.items(), key=lambda p: p[1][0], reverse=True)
parmax = odic01[0][0]
inparmax = candinames.index(parmax)
maxivotos = candimaxis[inparmax]
nseatsre = canditrab[inparmax][1]
# This line does the magic
canditrab[inparmax] = (maxivotos / (nseatsre + 2), nseatsre + 1)
self.repre[parmax] = nseatsre + 1
# Fill the asignation table dictionary
for j, trab in enumerate(canditrab):
self.asigna[candinames[j]].append(int(trab[0]))
# We need to know which was the party assigned with the seat before the last seat
if i == self.nseats - 2:
penparmax = parmax
else:
penparmax = parmax
# Calculate the votes needed for another seat
self.falta = {}
votult = self.asigna[parmax][-2]
for name in candinames:
votu = self.dcandi[name]
crep = self.repre[name]
if name == parmax:
# The last asigned seat gets the number differently
crepp = self.repre[penparmax]
votp = self.dcandi[penparmax]
vfalta = int(votp / crepp * (crep + 1) - votu)
else:
cvot = self.asigna[name][-1]
vfalta = int((votult - cvot) * (crep + 1))
pfalta = (vfalta / votu) * 100.0
# Stores the number of votes and the percentage over the actual votes
self.falta[name] = (vfalta, pfalta)
if __name__ == '__main__':
"""Performs the d'Hondt seats calculation
$ python dhondt.py 21 3.0 "{'a': 100, 'b': 200}"
"""
baseparser = ArgumentParser(description="Performs the d'Hondt seats calculation")
group_min = baseparser.add_argument_group('Minimum data')
group_min.add_argument('nseats', help='Number of seats for the calculation')
group_min.add_argument('minper', help='Minimun percentage of votes to enter in the calculation')
group_min.add_argument('datcan', help='Dictionary with the candidatures data')
args = vars(baseparser.parse_args())
# # Gets the input data
# nseats, minper, census, white, sploitv, nabs, dcandi
nseats = int(args['nseats'])
minper = float(args['minper'])
dcandi = dict((k, eval(v)) for (k, v) in [it.split(':') for it in args['datcan'].replace("'", "").strip('{}').split(', ')])
# Performs the dhont calc
result = dhondt(nseats, minper, dcandi)
# Returns data calc
print(result)
print('<seats: {0}>'.format(sorted(result.repre.items(), key=lambda p: p[1], reverse=True)))
| gpl-2.0 |
buildbot/buildbot | master/buildbot/steps/package/rpm/__init__.py | 5 | 1124 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
"""
Steps specific to the rpm format.
"""
from buildbot.steps.package.rpm.mock import MockBuildSRPM
from buildbot.steps.package.rpm.mock import MockRebuild
from buildbot.steps.package.rpm.rpmbuild import RpmBuild
from buildbot.steps.package.rpm.rpmlint import RpmLint
__all__ = ['RpmBuild', 'RpmLint', 'MockBuildSRPM', 'MockRebuild']
| gpl-2.0 |
cyclops8456/vyos-kernel | debian/lib/python/debian_linux/utils.py | 6 | 2683 | import debian, re, textwrap
class SortedDict(dict):
__slots__ = '_list',
def __init__(self, entries = None):
super(SortedDict, self).__init__()
self._list = []
if entries is not None:
for key, value in entries:
self[key] = value
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self._list.remove(key)
def __setitem__(self, key, value):
super(SortedDict, self).__setitem__(key, value)
if key not in self._list:
self._list.append(key)
def iterkeys(self):
for i in iter(self._list):
yield i
def iteritems(self):
for i in iter(self._list):
yield (i, self[i])
def itervalues(self):
for i in iter(self._list):
yield self[i]
class Templates(dict):
def __init__(self, dir = "debian/templates"):
self.dir = dir
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError: pass
ret = self._read(key)
dict.__setitem__(self, key, ret)
return ret
def __setitem__(self, key, value):
raise NotImplemented()
def _read(self, name):
prefix, id = name.split('.', 1)
f = file("%s/%s.in" % (self.dir, name))
if prefix == 'control':
return self._readControl(f)
return f.read()
def _readControl(self, f):
entries = []
while True:
e = debian.Package()
last = None
lines = []
while True:
line = f.readline()
if not line:
break
line = line.strip('\n')
if not line:
break
if line[0] in ' \t':
if not last:
raise ValueError('Continuation line seen before first header')
lines.append(line.lstrip())
continue
if last:
e[last] = '\n'.join(lines)
i = line.find(':')
if i < 0:
raise ValueError("Not a header, not a continuation: ``%s''" % line)
last = line[:i]
lines = [line[i+1:].lstrip()]
if last:
e[last] = '\n'.join(lines)
if not e:
break
entries.append(e)
return entries
class TextWrapper(textwrap.TextWrapper):
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
| gpl-2.0 |
erkanay/django | django/contrib/gis/gdal/field.py | 105 | 6431 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
#### Python 'magic' routines ####
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise OGRException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
#### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise OGRException('Unable to retrieve date & time information from the field.')
#### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, OGRException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
}
ROGRFieldTypes = dict((cls, num) for num, cls in OGRFieldTypes.items())
| bsd-3-clause |
wartman4404/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/etree.py | 658 | 4613 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import gettext
_ = gettext.gettext
import re
from six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert type(node.tag) == text_type, type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| mpl-2.0 |
Beirdo/gjhurlbu-plugin-video-irishtv | aertv.py | 1 | 33191 | # -*- coding: utf-8 -*-
import re
import os
import sys
from time import strftime, strptime
import time, random
if sys.version_info >= (2, 7):
import json as _json
else:
import simplejson as _json
from cookielib import Cookie
from datetime import datetime, timedelta
from urlparse import urljoin
import xbmc
import xbmcgui
import xbmcplugin
import xbmcvfs
import mycgi
import utils
from loggingexception import LoggingException
import rtmp
import HTMLParser
from BeautifulSoup import BeautifulSoup
from provider import Provider
from brightcove import BrightCoveProvider
from irishtvplayer import BasePlayer
urlRoot = u"http://www.aertv.ie"
apiRoot = u"http://api.aertv.ie"
c_brightcove = u"http://c.brightcove.com"
loginCookieName = u"Aertv_login"
domain = u".aertv.ie"
# Default values only used if we can't get the info from the net, e.g. only used if we can't get the info from the net
defaultRTMPUrl = u"rtmpe://d-deg-mcdn.magnet.ie/rtplive&"
TIME_FORMAT = u"%Y-%m-%dT%H:%M:%S"
# Exclude channels
excludeChannels = [
u'/#aertv-live',
u'/#aertv-movies',
u'/#aertv-music',
u'/#aertv-sports',
u'/#unravel-travel',
u'/#dail-eireann',
u'/#dctv'
]
# Hard code paths to logos for users who don't want to use EPG
channelToLogo = {
u'rte-one' : u'http://www.aertv.ie/wp-content/uploads/2012/04/rte_one_162x129.png',
u'rte-two' : u'http://www.aertv.ie/wp-content/uploads/2012/04/rte_two_162x129.png',
u'rte-two-hd' : u'http://www.aertv.ie/wp-content/uploads/2012/05/rte_two_hd_162x129.png',
u'tv3' : u'http://www.aertv.ie/wp-content/uploads/2012/01/tv3_alt_162x129.png',
u'tg4' : u'http://www.aertv.ie/wp-content/uploads/2012/04/tg4_162x129.png',
u'3e' : u'http://www.aertv.ie/wp-content/uploads/2012/01/3e_alt_162x129.png',
#u'aertv-live' : u'http://www.aertv.ie/wp-content/uploads/2012/02/aertv_live_162x1291.png',
#u'aertv-movies' : u'http://www.aertv.ie/wp-content/uploads/2012/02/aertv_movies_162x1291.png',
#u'aertv-music' : u'http://www.aertv.ie/wp-content/uploads/2012/01/aertv_music_162x129.png',
#u'aertv-sports' : u'http://www.aertv.ie/wp-content/uploads/2012/04/aertv_sports_162x129.png',
#u'unravel-travel' : u'http://www.aertv.ie/wp-content/uploads/2012/03/unravel_travel_162x1292.png',
#u'dctv' : u'http://www.aertv.ie/wp-content/uploads/2012/04/dctv_162x129.png',
u'bbc-one' : u'http://www.aertv.ie/wp-content/uploads/2012/04/bbc_one_162x129.png',
u'bbc-two' : u'http://www.aertv.ie/wp-content/uploads/2012/04/bbc_two_162x129.png',
u'bbc-three' : u'http://www.aertv.ie/wp-content/uploads/2012/04/bbc_three_162x129.png',
u'bbc-four' : u'http://www.aertv.ie/wp-content/uploads/2012/04/bbc_four_162x129.png',
u'comedy-central' : u'http://www.aertv.ie/wp-content/uploads/2012/01/comedy_central_new_162x129.png',
u'comedy-central-extra' : u'http://www.aertv.ie/wp-content/uploads/2012/01/comedy_central_extra_new_162x129.png',
u'rte-one1' : u'http://www.aertv.ie/wp-content/uploads/2012/04/rte_one+1_162x129.png',
u'rte-news-now' : u'http://www.aertv.ie/wp-content/uploads/2012/04/rte_news_now_162x129.png',
u'bbc-news' : u'http://www.aertv.ie/wp-content/uploads/2012/04/bbc_news_162x129.png',
u'euronews' : u'http://www.aertv.ie/wp-content/uploads/2012/04/euronews_162x129.png',
u'france24' : u'http://www.aertv.ie/wp-content/uploads/2012/04/france24_162x129.png',
u'rt' : u'http://www.aertv.ie/wp-content/uploads/2012/04/russia_today_162x129.png',
u'rtejr' : u'http://www.aertv.ie/wp-content/uploads/2012/04/rte_jr_162x129.png',
u'cbbc' : u'http://www.aertv.ie/wp-content/uploads/2012/04/cbbc_162x129.png',
u'cbeebies' : u'http://www.aertv.ie/wp-content/uploads/2012/04/cbeebies_162x129.png',
u'nickelodeon' : u'http://www.aertv.ie/wp-content/uploads/2012/04/nickelodeon_162x129.png',
u'nicktoons' : u'http://www.aertv.ie/wp-content/uploads/2012/04/nicktoons_162x129.png',
u'nickjnr' : u'http://www.aertv.ie/wp-content/uploads/2012/04/nick_jr_162x129.png',
u'mtv' : u'http://www.aertv.ie/wp-content/uploads/2012/04/mtv_162x129.png',
u'vh1' : u'http://www.aertv.ie/wp-content/uploads/2012/04/VH1_162x129.png',
u'viva' : u'http://www.aertv.ie/wp-content/uploads/2012/04/viva_162x129.png'
#u'dail-eireann' : u'http://www.aertv.ie/wp-content/uploads/2012/04/dail_logo_162x129.png'
}
class AerTVProvider(BrightCoveProvider):
def __init__(self):
super(AerTVProvider, self).__init__()
self.loggedIn = False
self.plus = False
def ShowMe(self):
AERTV_NOTICE = os.path.join( sys.modules[u"__main__"].PROFILE_DATA_FOLDER, u"aertv_notice" )
if (len(sys.modules[u"__main__"].addon.getSetting( u'AerTV_email' )) == 0 or len(sys.modules[u"__main__"].addon.getSetting( u'AerTV_password' )) == 0) and xbmcvfs.exists(AERTV_NOTICE):
return False
return True
def initialise(self, httpManager, baseurl, pluginHandle, addon, language, PROFILE_DATA_FOLDER, RESOURCE_PATH):
super(AerTVProvider, self).initialise(httpManager, baseurl, pluginHandle, addon, language, PROFILE_DATA_FOLDER, RESOURCE_PATH)
self.aertvNoticeFilePath = os.path.join( sys.modules[u"__main__"].PROFILE_DATA_FOLDER, u"aertv_notice" )
if hasattr(sys.modules[u"__main__"], u"opener"):
httpManager.SetOpener(sys.modules[u"__main__"].opener)
if hasattr(sys.modules[u"__main__"], u"cookiejar"):
self.cookiejar = sys.modules[u"__main__"].cookiejar
return self.Login()
def Login(self):
self.log(u"", xbmc.LOGDEBUG)
"""
{
'epg': 'WEB_STD',
'user': {
'decay': 40,
'email': 'email',
'fname': 'fname',
'id': '87354',
'ipicid': 'aertv530916c892b25',
'is_paid_subscriber': 0/1,
'lname': 'lname',
'login': True,
'mailchimp': None,
'packages': [
{
'code': 'WEB_STD',
'desc': 'Free Channel Pack',
'package_id': '1'
},
{
"code":"AERTV_PLUS",
"desc":"Aertv Plus",
"package_id":"6"
}
],
'session': 'YWVydHY1MzA5MTZjODkyYjI0_1393452432',
'status': '1',
'val_code': None
}
}
"""
loginJSON = None
email = self.addon.getSetting( u'AerTV_email' ).decode(u'utf8')
password = self.addon.getSetting( u'AerTV_password' ).decode(u'utf8')
if len(email) == 0 or len(password) == 0:
if not xbmcvfs.exists(self.aertvNoticeFilePath):
file = open(self.aertvNoticeFilePath, u'w')
try:
file.write(" ")
finally:
file.close()
dialog = xbmcgui.Dialog()
dialog.ok(self.language(30105), self.language(30106))
self.addon.openSettings(sys.argv[ 0 ])
email = self.addon.getSetting( u'AerTV_email' ).decode(u'utf8')
password = self.addon.getSetting( u'AerTV_password' ).decode(u'utf8')
if len(email) == 0 or len(password) == 0:
self.log(u"No AerTV login details", xbmc.LOGDEBUG)
return False
try:
loginJSON = self.LoginViaCookie()
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
# 'AerTV login failed',
exception.addLogMessage(self.language(30101))
exception.process(severity = self.logLevel(xbmc.LOGWARNING))
if loginJSON is None:
try:
values = [{u'api':u'login'},{u'user':email},{u'pass':password}]
loginJSON = self.AttemptLogin(values, logUrl = False)
self.log(u"After weblogin loginJSON is None: " + unicode(loginJSON is None), xbmc.LOGDEBUG)
if loginJSON is None:
# 'AerTV login failed',
exception = LoggingException(self.language(30101))
# "Status Message: %s
exception.process(severity = self.logLevel(xbmc.LOGERROR))
return False
self.log(u"Login successful", xbmc.LOGDEBUG)
sessionId = loginJSON[u'user'][u'session']
days02 = 2*24*60*60
expiry = int(time.time()) + days02
self.log(u"Aertv_login expiry: " + unicode(expiry), xbmc.LOGDEBUG)
sessionCookie = self.MakeCookie(u'Aertv_login', sessionId, domain, expiry )
self.cookiejar.set_cookie(sessionCookie)
self.cookiejar.save()
loginJSON = self.LoginViaCookie()
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
# Error logging into AerTV
exception.addLogMessage(self.language(30101))
exception.process(severity = self.logLevel(xbmc.LOGERROR))
return False
self.LogLoginInfo(loginJSON)
if len(utils.getDictionaryValue(loginJSON[u'user'], u'packages')) < 2:
# Error logging into AerTV
exception = LoggingException(self.language(30101))
exception.process(severity = self.logLevel(xbmc.LOGERROR))
return False
return True
def LogLoginInfo(self, loginJSON):
self.log(u'is_paid_subscriber: %s' % utils.getDictionaryValue(loginJSON[u'user'], u'is_paid_subscriber'))
self.log(u'login: %s' % utils.getDictionaryValue(loginJSON[u'user'], u'login'))
self.log(u'status: %s' % utils.getDictionaryValue(loginJSON[u'user'], u'status'))
packages = utils.getDictionaryValue(loginJSON[u'user'], u'packages')
if packages:
self.log(u'status: %s' % utils.drepr(packages))
def AttemptLogin(self, values, logUrl = False):
self.log(u"", xbmc.LOGDEBUG)
try:
loginJSONText = None
loginJSON = None
url = self.GetAPIUrl(values)
loginJSONText = self.httpManager.GetWebPageDirect(url, logUrl = logUrl)
loginJSON = _json.loads(loginJSONText)
for key in loginJSON:
self.log(u"loginJSON['%s'] exists" % key, xbmc.LOGDEBUG)
if u'user' in loginJSON:
self.log(u"loginJSON['user']", xbmc.LOGDEBUG)
for key in loginJSON[u'user']:
if key == u'fname' or key == u'lname' or key == 'email':
self.log(u"loginJSON['user']['%s'] exists" % key, xbmc.LOGDEBUG)
else:
self.log(u"loginJSON['user']['%s'] = %s" % (key, utils.drepr(loginJSON[u'user'][key])), xbmc.LOGDEBUG)
# Check for failed login
if loginJSON[u'user'][u'login'] != True:
# Show error message
if u'status' in loginJSON[u'user']:
statusMessage = loginJSON[u'user'][u'status']
else:
statusMessage = u"None"
# 'AerTV login failed',
logException = LoggingException(self.language(30101))
# "Status Message: %s
logException.process(self.language(30102) % statusMessage, u"", xbmc.LOGDEBUG)
return None
self.log(u"AerTV successful login", xbmc.LOGDEBUG)
return loginJSON
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
if loginJSONText is not None:
msg = u"loginJSONText:\n\n%s\n\n" % loginJSONText
exception.addLogMessage(msg)
if loginJSON is not None:
msg = u"epgJSON:\n\n%s\n\n" % utils.drepr(loginJSON)
exception.addLogMessage(msg)
raise exception
def LoginViaCookie(self):
self.log(u"", xbmc.LOGDEBUG)
loginJSON = None
# List all the cookies with the matching name. Result is either an empty list, or a list with a single item
loginCookieList = [cookie for cookie in self.cookiejar if cookie.name == loginCookieName]
if len(loginCookieList) == 0:
self.log(u"No AerTV_login cookie", xbmc.LOGDEBUG)
return None
loginCookie = loginCookieList[0]
now = time.time()
if loginCookie.is_expired(now):
self.log(u"AerTV_login cookie expired", xbmc.LOGDEBUG)
return None
values = [{u'api':u'cookie'}, {u'login':u'web'}]
loginJSON = self.AttemptLogin(values)
return loginJSON
def GetProviderId(self):
return u"AerTV"
def ExecuteCommand(self, mycgi):
return super(AerTVProvider, self).ExecuteCommand(mycgi)
def ShowRootMenu(self):
self.log(u"", xbmc.LOGDEBUG)
try:
channels = None
epgJSON = None
channels = self.GetAvailableChannels()
if self.addon.getSetting( u'AerTV_show_epg' ) <> u'false':
values = [{u'api':u'epg'}, {u'type':u'basic'}]
url = self.GetAPIUrl(values)
epgJSONText = self.httpManager.GetWebPage(url, 300)
epgJSON = _json.loads(epgJSONText)
return self.ShowEPG(channels, epgJSON)
else:
return self.ShowChannelList(channels)
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
if epgJSON is not None:
msg=u"epgJSON:\n\n%s\n\n" % utils.drepr(epgJSON)
exception.addLogMessage(msg)
if channels is not None:
msg=u"channels:\n\n%s\n\n" % utils.drepr(channels)
exception.addLogMessage(msg)
# Cannot show root menu
exception.addLogMessage(self.language(30010))
exception.process(severity = self.logLevel(xbmc.LOGERROR))
return False
def GetAvailableChannels(self):
html = self.httpManager.GetWebPage(urlRoot, 300)
soup = BeautifulSoup(html)
channels = []
for channel in soup.findAll('a', 'live-channel'):
if len(channel('span')) > 0 or channel[u'href'] in excludeChannels:
continue
channels.append(channel)
return channels
def GetAPIUrl(self, parameters):
# {'api':'ddl', 'type':'basic'} => www.apiRoot.com/api/ddl/type/basic
url = apiRoot
for keyValue in parameters:
key = [key for key in keyValue][0]
url = url + u'/' + key + u'/' + keyValue[key]
return url
def ParseCommand(self, mycgi):
self.log(u"", xbmc.LOGDEBUG)
(channel, logo, loggedInStr) = mycgi.Params( u'channel', u'logo', u'loggedIn')
if loggedInStr == u'1':
self.loggedIn = True
if channel <> u'':
if logo == u'' and channel in channelToLogo:
logo = channelToLogo[channel]
return self.PlayVideoWithDialog(self.PlayChannel, (channel, logo))
return False
def ShowChannelList(self, channels):
self.log(u"", xbmc.LOGDEBUG)
listItems = []
for anchor in channels:
try:
playerIndex = anchor[u'href'].find(u'#')
if playerIndex == -1:
continue
slug = anchor[u'href'][playerIndex + 1:]
newLabel = anchor.text
description = newLabel
logo = channelToLogo[slug]
newListItem = xbmcgui.ListItem( label=newLabel )
newListItem.setThumbnailImage(logo)
channelUrl = self.GetURLStart() + u'&channel=' + slug + u'&logo=' + mycgi.URLEscape(logo)
infoLabels = {u'Title': newLabel, u'Plot': description, u'PlotOutline': description}
newListItem.setInfo(u'video', infoLabels)
newListItem.setProperty(u"Video", u"true")
listItems.append( (channelUrl, newListItem, False) )
except (Exception) as exception:
# Problem getting details for a particular channel, show a warning and keep going
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
# Error processing channel
message = self.language(30067)
try:
message = message + u" " + anchor.text
#message = message.encode('utf8')
except NameError:
pass
exception.addLogMessage(message)
exception.process(severity = self.logLevel(xbmc.LOGWARNING))
xbmcplugin.addDirectoryItems( handle=self.pluginHandle, items=listItems )
xbmcplugin.endOfDirectory( handle=self.pluginHandle, succeeded=True )
return True
def ShowEPG(self, channels, epgJSON):
self.log(u"", xbmc.LOGDEBUG)
channelDetails = self.ParseEPGData(epgJSON)
listItems = []
for anchor in channels:
try:
playerIndex = anchor[u'href'].find(u'#')
if playerIndex == -1:
continue
slug = anchor[u'href'][playerIndex + 1:]
(label, description, logo) = self.GetListItemDataForSlug(channelDetails, slug)
newLabel = anchor.text + " " + label
newListItem = xbmcgui.ListItem( label=newLabel )
newListItem.setThumbnailImage(logo)
channelUrl = self.GetURLStart() + u'&channel=' + slug + u'&logo=' + mycgi.URLEscape(logo)
infoLabels = {u'Title': newLabel, u'Plot': description, u'PlotOutline': description}
newListItem.setInfo(u'video', infoLabels)
newListItem.setProperty(u"Video", u"true")
listItems.append( (channelUrl, newListItem, False) )
except (Exception) as exception:
# Problem getting details for a particular channel, show a warning and keep going
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
# Error processing channel
message = self.language(30067)
try:
message = message + u" " + anchor.text
#message = message.encode('utf8')
except NameError:
pass
exception.addLogMessage(message)
exception.process(severity = self.logLevel(xbmc.LOGWARNING))
xbmcplugin.addDirectoryItems( handle=self.pluginHandle, items=listItems )
xbmcplugin.endOfDirectory( handle=self.pluginHandle, succeeded=True )
return True
def GetTimeCutOffs(self):
offset = int(self.addon.getSetting( u'AerTV_epg_offset' ))
startCutOff = datetime.now() + timedelta(hours=offset)
startCutOff = startCutOff.replace(second=0,microsecond=0)
# round time
if startCutOff.minute > 29:
startRound = startCutOff.replace(minute=30)
else:
startRound = startCutOff.replace(minute=0)
endCutOff = startRound + timedelta(hours=2)
return (startCutOff, endCutOff)
def GetEPGDetails(self, channelEntry, startCutOff, endCutOff):
detail = [channelEntry[u'channel'][u'logo']]
videoCount = 0
self.log(u"startCutOff: %s, endCutOff: %s" % (repr(startCutOff), repr(endCutOff)), xbmc.LOGDEBUG)
for video in channelEntry[u'videos']:
try:
self.log(u"repr(datetime): " + repr(datetime))
self.log(u"video: " + utils.drepr(video))
self.log(u"video['starttime']: " + video[u'starttime'])
try:
startTime = datetime.strptime(video[u'starttime'], TIME_FORMAT)
endTime = datetime.strptime(video[u'endtime'], TIME_FORMAT)
except TypeError:
startTime = datetime.fromtimestamp(time.mktime(time.strptime(video[u'starttime'], TIME_FORMAT)))
endTime = datetime.fromtimestamp(time.mktime(time.strptime(video[u'endtime'], TIME_FORMAT)))
if startTime >= startCutOff and startTime < endCutOff:
self.log(u"startTime >= startCutOff and startTime < endCutOff", xbmc.LOGDEBUG)
videoCount = videoCount + 1
if endTime > endCutOff:
self.log(u"endTime > endCutOff", xbmc.LOGDEBUG)
# Add "Now ... Ends at ..." if count is 0, or "Next..."
detail.append(video)
break
else:
self.log(u"endTime <= endCutOff", xbmc.LOGDEBUG)
# Add Now .../Next ... depending on count
detail.append(video)
elif startTime < startCutOff and endTime > startCutOff:
self.log(u"startTime < startCutOff and endTime > startCutOff", xbmc.LOGDEBUG)
videoCount = videoCount + 1
# Add Now .../Next ... depending on count
detail.append(video)
else:
self.log(u"Ignoring video: " + video[u'name'])
if (videoCount > 1):
break
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
self.log(u"video: %s" % repr(video))
# Error processing EPG entry
exception.addLogMessage(self.language(30027))
exception.printLogMessages(severity = xbmc.LOGWARNING)
return detail
#TODO Consider breaking the epgJSON processing into a separate class
def ParseEPGData(self, epgJSON):
(startCutOff, endCutOff) = self.GetTimeCutOffs()
channelDetails = {}
# Using slug as the identifier for each channel, create a dictionary that allows details of each channel to be looked up by slug.
for channelEntry in epgJSON[u'data']:
slug = channelEntry[u'channel'][u'slug']
detail = self.GetEPGDetails(channelEntry, startCutOff, endCutOff)
"""
# Now
if len(channelEntry['videos']) > 0:
detail.append(channelEntry['videos'][0])
# Next
if len(channelEntry['videos']) > 1:
detail.append(channelEntry['videos'][1])
"""
channelDetails[slug] = detail
return channelDetails
def GetListItemData(self, detail):
description = ''
if len(detail) == 1:
label = u'Unknown or Off Air'
self.log(repr(detail))
else:
description = detail[1][u'description']
label = detail[1][u'name']
if len(detail) > 2:
# E.g. "Nuacht [18:00 Six One]"
startTime = strptime(detail[2][u'starttime'], TIME_FORMAT)
label = u" " + label + u" [ " + strftime(u"%H:%M", startTime) + u" " + detail[2][u'name'] + u" ]"
else:
# E.g. "Nuacht [ Ends at 18:00 ]"
endTime = strptime(detail[1][u'endtime'], TIME_FORMAT)
label = u" " + label + u" [ Ends at " + strftime(u"%H:%M", endTime) + u" ]"
return label, description, detail[0]
def GetListItemDataForSlug(self, channelDetails, slug):
detail = channelDetails[slug]
return self.GetListItemData(detail)
def PlayChannel(self, channel, logo):
"""
RTE 1 jQuery110109518442715161376_1395526161546(
{
"player":"Get player here from post ",
"data":{
"post_id":"17",
"freebie":true,
"auth":true,
"magonly":false,
"channel":"RT\u00c9 One",
"internal":"off",
"streamname":"",
"free":true,
"videoId":"rte-one",
"publisherId":"1242843906001",
"playerId":"1454761980001",
"playerKey":"AQ~~,AAABIV9E_9E~,lGDQr89oSbKT02RqV22r-E007AitVINH",
"appStreamUrl":"",
"show":"The Saturday Night Show"
},
"flag":"new"
})
"""
try:
jsonData = None
values = [{u'api':u'player'}, {u'type':u'name'}, {u'val':channel}]
url = self.GetAPIUrl(values)
# "Getting channel information"
self.dialog.update(10, self.language(30107))
jsonData = self.httpManager.GetWebPage(url, 20000)
playerJSON=_json.loads(jsonData)
self.log(u"json data:" + unicode(playerJSON))
playerId = playerJSON[u'data'][u'playerId']
publisherId = playerJSON[u'data'][u'publisherId']
playerKey = playerJSON[u'data'][u'playerKey']
viewExperienceUrl = urlRoot + u'/#' + channel
#streamType = unicode(self.addon.getSetting( u'AerTV_stream_type' ))
#self.log(u"Stream type setting: " + streamType)
try:
if self.dialog.iscanceled():
return False
# "Getting stream url"
self.dialog.update(25, self.language(30087))
streamUrl = self.GetStreamUrl(playerKey, viewExperienceUrl, playerId, contentRefId = channel)
self.log(u"streamUrl: %s" % streamUrl)
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
self.log(u" channel: %s" % channel)
# Error getting rtmp url.
exception.addLogMessage(self.language(30066))
# Cannot play video stream
raise exception
if self.dialog.iscanceled():
return False
# "Getting \"Now Playing\" data
self.dialog.update(35, self.language(30088))
# Set up info for "Now Playing" screen
infoLabels = self.GetInfoLabels(playerJSON)
#RTMP
if streamUrl.upper().startswith(self.language(30081)):
playPathIndex = streamUrl.index(u'&') + 1
playPath = streamUrl[playPathIndex:]
qsData = self.GetQSData(channel, playerId, publisherId, playerKey)
swfUrl = self.GetSwfUrl(qsData)
pageUrl = urlRoot
if u'videoId' in playerJSON[u'data']:
videoId = playerJSON[u'data'][u'videoId']
else:
videoId = playerJSON[u'data'][u'offset'][u'videos'][0]
app = u"rtplive?videoId=%s&lineUpId=&pubId=%s&playerId=%s" % (videoId, publisherId, playerId)
rtmpVar = rtmp.RTMP(rtmp = streamUrl, app = app, swfUrl = swfUrl, playPath = playPath, pageUrl = pageUrl, live = True)
self.AddSocksToRTMP(rtmpVar)
self.Play(infoLabels, logo, rtmpVar)
else:
self.Play(infoLabels, logo, url = streamUrl)
return True
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
if jsonData is not None:
msg = u"jsonData:\n\n%s\n\n" % jsonData
exception.addLogMessage(msg)
# Error preparing or playing stream
exception.addLogMessage(self.language(30066))
exception.process(severity = self.logLevel(xbmc.LOGERROR))
return False
def GetInfoLabels(self, playerJSON):
infoLabels = None
try:
channel = playerJSON['data']['channel']
label = channel + u" " + playerJSON['data']['show']
description = label
infoLabels = {u'Title': label, u'Plot': description}
except (Exception) as exception:
if not isinstance(exception, LoggingException):
exception = LoggingException.fromException(exception)
# Error getting title and logo info for %s
exception.addLogMessage(self.language(30068) % channel)
exception.process(severity = xbmc.LOGWARNING)
return infoLabels
def GetEpgJSON(self, url):
epgJSONText = self.httpManager.GetWebPage(url, 0)
epgJSON = _json.loads(epgJSONText)
return epgJSON
"""
{
'TTLToken': '',
'URL': u'https://www.aertv.ie/#rte-one',
'contentOverrides': [
{
'contentId': nan,
'contentIds': None,
'contentRefId': u'rte-one',
'contentRefIds': None,
'contentType': 0,
'featuredId': nan,
'featuredRefId': None,
'target': u'videoPlayer'
}
],
'deliveryType': nan,
'experienceId': 1535624864001.0,
'playerKey': u'AQ~~,AAABIV9E_9E~,lGDQr89oSbJf6x1rDuEAWKPqTYfK-JH2'
},
u'a7ef6ffbfba938b174f5044af3343163a0877c48'
"""
def GetAmfClassHash(self, className):
return u'a7ef6ffbfba938b174f5044af3343163a0877c48'
def GetQSData(self, videoPlayer, playerId, publisherId, playerKey):
#TODO Use a default url, in case of exception and log response
qsdata = {}
qsdata[u'width'] = u'100%'
qsdata[u'height'] = u'100%'
qsdata[u'flashID'] = u'aertv'
qsdata[u'playerID'] = playerId
qsdata[u'purl'] = urlRoot
qsdata[u'@videoPlayer'] = videoPlayer
qsdata[u'playerKey'] = playerKey
qsdata[u'publisherID'] = publisherId
qsdata[u'bgcolor'] = u'#FFFFFF'
qsdata[u'isVid'] = u'true'
qsdata[u'isUI'] = u'true'
qsdata[u'autostart'] = u'true'
qsdata[u'wmode'] = u'transparent'
qsdata[u'localizedErrorXML'] = u'https://aertv.ie/wp-content/themes/aertv/aertv-custom-error-messages.xml'
qsdata[u'templateLoadHandler'] = u'liveTemplateLoaded'
qsdata[u'includeAPI'] = u'true'
qsdata[u'debuggerID'] = u''
qsdata[u'isUI'] = u'true'
return qsdata
| gpl-3.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py | 559 | 13598 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Class representing the list of files in a distribution.
Equivalent to distutils.filelist, but fixes some problems.
"""
import fnmatch
import logging
import os
import re
from . import DistlibException
from .compat import fsdecode
from .util import convert_path
__all__ = ['Manifest']
logger = logging.getLogger(__name__)
# a \ followed by some spaces + EOL
_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M)
_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
class Manifest(object):
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
"""
def __init__(self, base=None):
"""
Initialise an instance.
:param base: The base directory to explore under.
"""
self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
self.prefix = self.base + os.sep
self.allfiles = None
self.files = set()
#
# Public API
#
def findall(self):
"""Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
"""
from stat import S_ISREG, S_ISDIR, S_ISLNK
self.allfiles = allfiles = []
root = self.base
stack = [root]
pop = stack.pop
push = stack.append
while stack:
root = pop()
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat.st_mode
if S_ISREG(mode):
allfiles.append(fsdecode(fullname))
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname)
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
"""
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item))
def add_many(self, items):
"""
Add a list of files to the manifest.
:param items: The pathnames to add. These can be relative to the base.
"""
for item in items:
self.add(item)
def sorted(self, wantdirs=False):
"""
Return sorted files in directory order
"""
def add_dir(dirs, d):
dirs.add(d)
logger.debug('add_dir added %s', d)
if d != self.base:
parent, _ = os.path.split(d)
assert parent not in ('', '/')
add_dir(dirs, parent)
result = set(self.files) # make a copy!
if wantdirs:
dirs = set()
for f in result:
add_dir(dirs, os.path.dirname(f))
result |= dirs
return [os.path.join(*path_tuple) for path_tuple in
sorted(os.path.split(path) for path in result)]
def clear(self):
"""Clear all collected files."""
self.files = set()
self.allfiles = []
def process_directive(self, directive):
"""
Process a directive which either adds some files from ``allfiles`` to
``files``, or removes some files from ``files``.
:param directive: The directive to process. This should be in a format
compatible with distutils ``MANIFEST.in`` files:
http://docs.python.org/distutils/sourcedist.html#commands
"""
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dirpattern).
action, patterns, thedir, dirpattern = self._parse_directive(directive)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=True):
logger.warning('no files found matching %r', pattern)
elif action == 'exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=True)
#if not found:
# logger.warning('no previously-included files '
# 'found matching %r', pattern)
elif action == 'global-include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=False):
logger.warning('no files found matching %r '
'anywhere in distribution', pattern)
elif action == 'global-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=False)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found anywhere in '
# 'distribution', pattern)
elif action == 'recursive-include':
for pattern in patterns:
if not self._include_pattern(pattern, prefix=thedir):
logger.warning('no files found matching %r '
'under directory %r', pattern, thedir)
elif action == 'recursive-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, prefix=thedir)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found under directory %r',
# pattern, thedir)
elif action == 'graft':
if not self._include_pattern(None, prefix=dirpattern):
logger.warning('no directories found matching %r',
dirpattern)
elif action == 'prune':
if not self._exclude_pattern(None, prefix=dirpattern):
logger.warning('no previously-included directories found '
'matching %r', dirpattern)
else: # pragma: no cover
# This should never happen, as it should be caught in
# _parse_template_line
raise DistlibException(
'invalid action %r' % action)
#
# Private API
#
def _parse_directive(self, directive):
"""
Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns
"""
words = directive.split()
if len(words) == 1 and words[0] not in ('include', 'exclude',
'global-include',
'global-exclude',
'recursive-include',
'recursive-exclude',
'graft', 'prune'):
# no action given, let's use the default 'include'
words.insert(0, 'include')
action = words[0]
patterns = thedir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistlibException(
'%r expects <pattern1> <pattern2> ...' % action)
patterns = [convert_path(word) for word in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistlibException(
'%r expects <dir> <pattern1> <pattern2> ...' % action)
thedir = convert_path(words[1])
patterns = [convert_path(word) for word in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistlibException(
'%r expects a single <dir_pattern>' % action)
dir_pattern = convert_path(words[1])
else:
raise DistlibException('unknown action %r' % action)
return action, patterns, thedir, dir_pattern
def _include_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern.
Patterns are not quite the same as implemented by the 'fnmatch'
module: '*' and '?' match non-special characters, where "special"
is platform-dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found.
"""
# XXX docstring lying about what the special chars are?
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.files.add(name)
found = True
return found
def _exclude_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'.
Other parameters are the same as for 'include_pattern()', above.
The list 'self.files' is modified in place. Return True if files are
found.
This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
packaging source distributions
"""
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
for f in list(self.files):
if pattern_re.search(f):
self.files.remove(f)
found = True
return found
def _translate_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Translate a shell-like wildcard pattern to a compiled regular
expression.
Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
if pattern:
pattern_re = self._glob_to_re(pattern)
else:
pattern_re = ''
base = re.escape(os.path.join(self.base, ''))
if prefix is not None:
# ditch end of pattern character
empty_pattern = self._glob_to_re('')
prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
pattern_re = '^' + base + sep.join((prefix_re,
'.*' + pattern_re))
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = '^' + base + pattern_re
return re.compile(pattern_re)
def _glob_to_re(self, pattern):
"""Translate a shell-like glob pattern to a regular expression.
Return a string containing the regex. Differs from
'fnmatch.translate()' in that '*' does not match "special characters"
(which are platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
| gpl-2.0 |
ekesken/istatistikciadamlazim | openid/yadis/xrires.py | 157 | 4268 | # -*- test-case-name: openid.test.test_xrires -*-
"""XRI resolution.
"""
from urllib import urlencode
from openid import fetchers
from openid.yadis import etxrd
from openid.yadis.xri import toURINormal
from openid.yadis.services import iterServices
DEFAULT_PROXY = 'http://proxy.xri.net/'
class ProxyResolver(object):
"""Python interface to a remote XRI proxy resolver.
"""
def __init__(self, proxy_url=DEFAULT_PROXY):
self.proxy_url = proxy_url
def queryURL(self, xri, service_type=None):
"""Build a URL to query the proxy resolver.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_type: The service type to resolve, if you desire
service endpoint selection. A service type is a URI.
@type service_type: str
@returns: a URL
@returntype: str
"""
# Trim off the xri:// prefix. The proxy resolver didn't accept it
# when this code was written, but that may (or may not) change for
# XRI Resolution 2.0 Working Draft 11.
qxri = toURINormal(xri)[6:]
hxri = self.proxy_url + qxri
args = {
# XXX: If the proxy resolver will ensure that it doesn't return
# bogus CanonicalIDs (as per Steve's message of 15 Aug 2006
# 11:13:42), then we could ask for application/xrd+xml instead,
# which would give us a bit less to process.
'_xrd_r': 'application/xrds+xml',
}
if service_type:
args['_xrd_t'] = service_type
else:
# Don't perform service endpoint selection.
args['_xrd_r'] += ';sep=false'
query = _appendArgs(hxri, args)
return query
def query(self, xri, service_types):
"""Resolve some services for an XRI.
Note: I don't implement any service endpoint selection beyond what
the resolver I'm querying does, so the Services I return may well
include Services that were not of the types you asked for.
May raise fetchers.HTTPFetchingError or L{etxrd.XRDSError} if
the fetching or parsing don't go so well.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_types: A list of services types to query for. Service
types are URIs.
@type service_types: list of str
@returns: tuple of (CanonicalID, Service elements)
@returntype: (unicode, list of C{ElementTree.Element}s)
"""
# FIXME: No test coverage!
services = []
# Make a seperate request to the proxy resolver for each service
# type, as, if it is following Refs, it could return a different
# XRDS for each.
canonicalID = None
for service_type in service_types:
url = self.queryURL(xri, service_type)
response = fetchers.fetch(url)
if response.status not in (200, 206):
# XXX: sucks to fail silently.
# print "response not OK:", response
continue
et = etxrd.parseXRDS(response.body)
canonicalID = etxrd.getCanonicalID(xri, et)
some_services = list(iterServices(et))
services.extend(some_services)
# TODO:
# * If we do get hits for multiple service_types, we're almost
# certainly going to have duplicated service entries and
# broken priority ordering.
return canonicalID, services
def _appendArgs(url, args):
"""Append some arguments to an HTTP query.
"""
# to be merged with oidutil.appendArgs when we combine the projects.
if hasattr(args, 'items'):
args = args.items()
args.sort()
if len(args) == 0:
return url
# According to XRI Resolution section "QXRI query parameters":
#
# """If the original QXRI had a null query component (only a leading
# question mark), or a query component consisting of only question
# marks, one additional leading question mark MUST be added when
# adding any XRI resolution parameters."""
if '?' in url.rstrip('?'):
sep = '&'
else:
sep = '?'
return '%s%s%s' % (url, sep, urlencode(args))
| gpl-3.0 |
f1aky/xadmin | xadmin/plugins/actions.py | 6 | 11229 | from collections import OrderedDict
from django import forms
from django.core.exceptions import PermissionDenied
from django.db import router
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ungettext
from django.utils.text import capfirst
from django.contrib.admin.utils import get_deleted_objects
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.util import model_format_dict, model_ngettext
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.base import filter_hook, ModelAdminView
ACTION_CHECKBOX_NAME = '_selected_action'
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
def action_checkbox(obj):
return checkbox.render(ACTION_CHECKBOX_NAME, force_unicode(obj.pk))
action_checkbox.short_description = mark_safe(
'<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
action_checkbox.allow_export = False
action_checkbox.is_column = False
class BaseActionView(ModelAdminView):
action_name = None
description = None
icon = 'fa fa-tasks'
model_perm = 'change'
@classmethod
def has_perm(cls, list_view):
return list_view.get_model_perms()[cls.model_perm]
def init_action(self, list_view):
self.list_view = list_view
self.admin_site = list_view.admin_site
@filter_hook
def do_action(self, queryset):
pass
class DeleteSelectedAction(BaseActionView):
action_name = "delete_selected"
description = _(u'Delete selected %(verbose_name_plural)s')
delete_confirmation_template = None
delete_selected_confirmation_template = None
delete_models_batch = True
model_perm = 'delete'
icon = 'fa fa-times'
@filter_hook
def delete_models(self, queryset):
n = queryset.count()
if n:
if self.delete_models_batch:
self.log('delete', _('Batch delete %(count)d %(items)s.') % { "count": n, "items": model_ngettext(self.opts, n) })
queryset.delete()
else:
for obj in queryset:
self.log('delete', '', obj)
obj.delete()
self.message_user(_("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(self.opts, n)
}, 'success')
@filter_hook
def do_action(self, queryset):
# Check that the user has delete permission for the actual model
if not self.has_delete_permission():
raise PermissionDenied
using = router.db_for_write(self.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, model_count, perms_needed, protected = get_deleted_objects(
queryset, self.opts, self.user, self.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if self.request.POST.get('post'):
if perms_needed:
raise PermissionDenied
self.delete_models(queryset)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(self.opts.verbose_name)
else:
objects_name = force_unicode(self.opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = self.get_context()
context.update({
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": self.opts,
"app_label": self.app_label,
'action_checkbox_name': ACTION_CHECKBOX_NAME,
})
# Display the confirmation page
return TemplateResponse(self.request, self.delete_selected_confirmation_template or
self.get_template_list('views/model_delete_selected_confirm.html'), context)
class ActionPlugin(BaseAdminPlugin):
# Actions
actions = []
actions_selection_counter = True
global_actions = [DeleteSelectedAction]
def init_request(self, *args, **kwargs):
self.actions = self.get_actions()
return bool(self.actions)
def get_list_display(self, list_display):
if self.actions:
list_display.insert(0, 'action_checkbox')
self.admin_view.action_checkbox = action_checkbox
return list_display
def get_list_display_links(self, list_display_links):
if self.actions:
if len(list_display_links) == 1 and list_display_links[0] == 'action_checkbox':
return list(self.admin_view.list_display[1:2])
return list_display_links
def get_context(self, context):
if self.actions and self.admin_view.result_count:
av = self.admin_view
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', av.result_count)
new_context = {
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(av.result_list)},
'selection_note_all': selection_note_all % {'total_count': av.result_count},
'action_choices': self.get_action_choices(),
'actions_selection_counter': self.actions_selection_counter,
}
context.update(new_context)
return context
def post_response(self, response, *args, **kwargs):
request = self.admin_view.request
av = self.admin_view
# Actions with no confirmation
if self.actions and 'action' in request.POST:
action = request.POST['action']
if action not in self.actions:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
ac, name, description, icon = self.actions[action]
select_across = request.POST.get('select_across', False) == '1'
selected = request.POST.getlist(ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
queryset = av.list_queryset._clone()
if not select_across:
# Perform the action only on the selected objects
queryset = av.list_queryset.filter(pk__in=selected)
response = self.response_action(ac, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
return response
def response_action(self, ac, queryset):
if isinstance(ac, type) and issubclass(ac, BaseActionView):
action_view = self.get_model_view(ac, self.admin_view.model)
action_view.init_action(self.admin_view)
return action_view.do_action(queryset)
else:
return ac(self.admin_view, self.request, queryset)
def get_actions(self):
if self.actions is None:
return OrderedDict()
actions = [self.get_action(action) for action in self.global_actions]
for klass in self.admin_view.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
if not class_actions:
continue
actions.extend(
[self.get_action(action) for action in class_actions])
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into a OrderedDict keyed by name.
actions = OrderedDict([
(name, (ac, name, desc, icon))
for ac, name, desc, icon in actions
])
return actions
def get_action_choices(self):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = []
for ac, name, description, icon in self.actions.itervalues():
choice = (name, description % model_format_dict(self.opts), icon)
choices.append(choice)
return choices
def get_action(self, action):
if isinstance(action, type) and issubclass(action, BaseActionView):
if not action.has_perm(self.admin_view):
return None
return action, getattr(action, 'action_name'), getattr(action, 'description'), getattr(action, 'icon')
elif callable(action):
func = action
action = action.__name__
elif hasattr(self.admin_view.__class__, action):
func = getattr(self.admin_view.__class__, action)
else:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description, getattr(func, 'icon', 'tasks')
# View Methods
def result_header(self, item, field_name, row):
if item.attr and field_name == 'action_checkbox':
item.classes.append("action-checkbox-column")
return item
def result_item(self, item, obj, field_name, row):
if item.field is None and field_name == u'action_checkbox':
item.classes.append("action-checkbox")
return item
# Media
def get_media(self, media):
if self.actions and self.admin_view.result_count:
media = media + self.vendor('xadmin.plugin.actions.js', 'xadmin.plugins.css')
return media
# Block Views
def block_results_bottom(self, context, nodes):
if self.actions and self.admin_view.result_count:
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_bottom.actions.html',
context=get_context_dict(context)))
site.register_plugin(ActionPlugin, ListAdminView)
| bsd-3-clause |
wilebeast/FireFox-OS | B2G/gecko/testing/marionette/client/marionette/emulator.py | 2 | 19514 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from b2ginstance import B2GInstance
import datetime
from errors import *
from mozdevice import devicemanagerADB, DMError
from mozprocess import ProcessHandlerMixin
import os
import re
import platform
import shutil
import socket
import subprocess
import sys
from telnetlib import Telnet
import tempfile
import time
import traceback
from emulator_battery import EmulatorBattery
from emulator_geo import EmulatorGeo
from emulator_screen import EmulatorScreen
class LogcatProc(ProcessHandlerMixin):
"""Process handler for logcat which saves all output to a logfile.
"""
def __init__(self, logfile, cmd, **kwargs):
self.logfile = logfile
kwargs.setdefault('processOutputLine', []).append(self.log_output)
ProcessHandlerMixin.__init__(self, cmd, **kwargs)
def log_output(self, line):
f = open(self.logfile, 'a')
f.write(line + "\n")
f.flush()
class Emulator(object):
deviceRe = re.compile(r"^emulator-(\d+)(\s*)(.*)$")
_default_res = '320x480'
prefs = {'app.update.enabled': False,
'app.update.staging.enabled': False,
'app.update.service.enabled': False}
def __init__(self, homedir=None, noWindow=False, logcat_dir=None,
arch="x86", emulatorBinary=None, res=None, sdcard=None,
userdata=None):
self.port = None
self.dm = None
self._emulator_launched = False
self.proc = None
self.marionette_port = None
self.telnet = None
self._tmp_sdcard = None
self._tmp_userdata = None
self._adb_started = False
self.remote_user_js = '/data/local/user.js'
self.logcat_dir = logcat_dir
self.logcat_proc = None
self.arch = arch
self.binary = emulatorBinary
self.res = res or self._default_res
self.battery = EmulatorBattery(self)
self.geo = EmulatorGeo(self)
self.screen = EmulatorScreen(self)
self.homedir = homedir
self.sdcard = sdcard
self.noWindow = noWindow
if self.homedir is not None:
self.homedir = os.path.expanduser(homedir)
self.dataImg = userdata
self.copy_userdata = self.dataImg is None
def _check_for_b2g(self):
self.b2g = B2GInstance(homedir=self.homedir, emulator=True)
self.adb = self.b2g.adb_path
self.homedir = self.b2g.homedir
if self.arch not in ("x86", "arm"):
raise Exception("Emulator architecture must be one of x86, arm, got: %s" %
self.arch)
host_dir = "linux-x86"
if platform.system() == "Darwin":
host_dir = "darwin-x86"
host_bin_dir = os.path.join("out", "host", host_dir, "bin")
if self.arch == "x86":
binary = os.path.join(host_bin_dir, "emulator-x86")
kernel = "prebuilts/qemu-kernel/x86/kernel-qemu"
sysdir = "out/target/product/generic_x86"
self.tail_args = []
else:
binary = os.path.join(host_bin_dir, "emulator")
kernel = "prebuilts/qemu-kernel/arm/kernel-qemu-armv7"
sysdir = "out/target/product/generic"
self.tail_args = ["-cpu", "cortex-a8"]
if(self.sdcard):
self.mksdcard = os.path.join(self.homedir, host_bin_dir, "mksdcard")
self.create_sdcard(self.sdcard)
if not self.binary:
self.binary = os.path.join(self.homedir, binary)
self.b2g.check_file(self.binary)
self.kernelImg = os.path.join(self.homedir, kernel)
self.b2g.check_file(self.kernelImg)
self.sysDir = os.path.join(self.homedir, sysdir)
self.b2g.check_file(self.sysDir)
if not self.dataImg:
self.dataImg = os.path.join(self.sysDir, 'userdata.img')
self.b2g.check_file(self.dataImg)
def __del__(self):
if self.telnet:
self.telnet.write('exit\n')
self.telnet.read_all()
@property
def args(self):
qemuArgs = [self.binary,
'-kernel', self.kernelImg,
'-sysdir', self.sysDir,
'-data', self.dataImg]
if self._tmp_sdcard:
qemuArgs.extend(['-sdcard', self._tmp_sdcard])
if self.noWindow:
qemuArgs.append('-no-window')
qemuArgs.extend(['-memory', '512',
'-partition-size', '512',
'-verbose',
'-skin', self.res,
'-gpu', 'on',
'-qemu'] + self.tail_args)
return qemuArgs
@property
def is_running(self):
if self._emulator_launched:
return self.proc is not None and self.proc.poll() is None
else:
return self.port is not None
def check_for_crash(self):
"""
Checks if the emulator has crashed or not. Always returns False if
we've connected to an already-running emulator, since we can't track
the emulator's pid in that case. Otherwise, returns True iff
self.proc is not None (meaning the emulator hasn't been explicitly
closed), and self.proc.poll() is also not None (meaning the emulator
process has terminated).
"""
if (self._emulator_launched and self.proc is not None
and self.proc.poll() is not None):
return True
return False
def check_for_minidumps(self, symbols_path):
return self.b2g.check_for_crashes(symbols_path)
def create_sdcard(self, sdcard):
self._tmp_sdcard = tempfile.mktemp(prefix='sdcard')
sdargs = [self.mksdcard, "-l", "mySdCard", sdcard, self._tmp_sdcard]
sd = subprocess.Popen(sdargs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
retcode = sd.wait()
if retcode:
raise Exception('unable to create sdcard : exit code %d: %s'
% (retcode, sd.stdout.read()))
return None
def _run_adb(self, args):
args.insert(0, self.adb)
if self.port:
args.insert(1, '-s')
args.insert(2, 'emulator-%d' % self.port)
adb = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
retcode = adb.wait()
if retcode:
raise Exception('adb terminated with exit code %d: %s'
% (retcode, adb.stdout.read()))
return adb.stdout.read()
def _get_telnet_response(self, command=None):
output = []
assert(self.telnet)
if command is not None:
self.telnet.write('%s\n' % command)
while True:
line = self.telnet.read_until('\n')
output.append(line.rstrip())
if line.startswith('OK'):
return output
elif line.startswith('KO:'):
raise Exception('bad telnet response: %s' % line)
def _run_telnet(self, command):
if not self.telnet:
self.telnet = Telnet('localhost', self.port)
self._get_telnet_response()
return self._get_telnet_response(command)
def close(self):
if self.is_running and self._emulator_launched:
self.proc.terminate()
self.proc.wait()
if self._adb_started:
self._run_adb(['kill-server'])
self._adb_started = False
if self.proc:
retcode = self.proc.poll()
self.proc = None
if self._tmp_userdata:
os.remove(self._tmp_userdata)
self._tmp_userdata = None
if self._tmp_sdcard:
os.remove(self._tmp_sdcard)
self._tmp_sdcard = None
return retcode
if self.logcat_proc and self.logcat_proc.proc.poll() is None:
self.logcat_proc.kill()
return 0
def _get_adb_devices(self):
offline = set()
online = set()
output = self._run_adb(['devices'])
for line in output.split('\n'):
m = self.deviceRe.match(line)
if m:
if m.group(3) == 'offline':
offline.add(m.group(1))
else:
online.add(m.group(1))
return (online, offline)
def start_adb(self):
result = self._run_adb(['start-server'])
# We keep track of whether we've started adb or not, so we know
# if we need to kill it.
if 'daemon started successfully' in result:
self._adb_started = True
else:
self._adb_started = False
def wait_for_system_message(self, marionette):
marionette.start_session()
marionette.set_context(marionette.CONTEXT_CHROME)
marionette.set_script_timeout(45000)
# Telephony API's won't be available immediately upon emulator
# boot; we have to wait for the syste-message-listener-ready
# message before we'll be able to use them successfully. See
# bug 792647.
print 'waiting for system-message-listener-ready...'
try:
marionette.execute_async_script("""
waitFor(
function() { marionetteScriptFinished(true); },
function() { return isSystemMessageListenerReady(); }
);
""")
except ScriptTimeoutException:
print 'timed out'
# We silently ignore the timeout if it occurs, since
# isSystemMessageListenerReady() isn't available on
# older emulators. 45s *should* be enough of a delay
# to allow telephony API's to work.
pass
print 'done'
marionette.set_context(marionette.CONTEXT_CONTENT)
marionette.delete_session()
def connect(self):
self.adb = B2GInstance.check_adb(self.homedir, emulator=True)
self.start_adb()
online, offline = self._get_adb_devices()
now = datetime.datetime.now()
while online == set([]):
time.sleep(1)
if datetime.datetime.now() - now > datetime.timedelta(seconds=60):
raise Exception('timed out waiting for emulator to be available')
online, offline = self._get_adb_devices()
self.port = int(list(online)[0])
self.dm = devicemanagerADB.DeviceManagerADB(adbPath=self.adb,
deviceSerial='emulator-%d' % self.port)
def add_prefs_to_profile(self, prefs=()):
local_user_js = tempfile.mktemp(prefix='localuserjs')
self.dm.getFile(self.remote_user_js, local_user_js)
with open(local_user_js, 'a') as f:
f.write('%s\n' % '\n'.join(prefs))
self.dm.pushFile(local_user_js, self.remote_user_js)
def start(self):
self._check_for_b2g()
self.start_adb()
qemu_args = self.args[:]
if self.copy_userdata:
# Make a copy of the userdata.img for this instance of the emulator to use.
self._tmp_userdata = tempfile.mktemp(prefix='marionette')
shutil.copyfile(self.dataImg, self._tmp_userdata)
qemu_args[qemu_args.index('-data') + 1] = self._tmp_userdata
original_online, original_offline = self._get_adb_devices()
self.proc = subprocess.Popen(qemu_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
online, offline = self._get_adb_devices()
now = datetime.datetime.now()
while online - original_online == set([]):
time.sleep(1)
if datetime.datetime.now() - now > datetime.timedelta(seconds=60):
raise Exception('timed out waiting for emulator to start')
online, offline = self._get_adb_devices()
self.port = int(list(online - original_online)[0])
self._emulator_launched = True
self.dm = devicemanagerADB.DeviceManagerADB(adbPath=self.adb,
deviceSerial='emulator-%d' % self.port)
# bug 802877
time.sleep(10)
self.geo.set_default_location()
self.screen.initialize()
if self.logcat_dir:
self.save_logcat()
# setup DNS fix for networking
self._run_adb(['shell', 'setprop', 'net.dns1', '10.0.2.3'])
def setup(self, marionette, gecko_path=None):
if gecko_path:
self.install_gecko(gecko_path, marionette)
self.wait_for_system_message(marionette)
self.set_prefs(marionette)
def set_prefs(self, marionette):
marionette.start_session()
marionette.set_context(marionette.CONTEXT_CHROME)
for pref in self.prefs:
marionette.execute_script("""
Components.utils.import("resource://gre/modules/Services.jsm");
let argtype = typeof(arguments[1]);
switch(argtype) {
case 'boolean':
Services.prefs.setBoolPref(arguments[0], arguments[1]);
break;
case 'number':
Services.prefs.setIntPref(arguments[0], arguments[1]);
break;
default:
Services.prefs.setCharPref(arguments[0], arguments[1]);
}
""", [pref, self.prefs[pref]])
marionette.delete_session()
def restart_b2g(self):
print 'restarting B2G'
self.dm.shellCheckOutput(['stop', 'b2g'])
time.sleep(10)
self.dm.shellCheckOutput(['start', 'b2g'])
if not self.wait_for_port():
raise TimeoutException("Timeout waiting for marionette on port '%s'" % self.marionette_port)
def install_gecko(self, gecko_path, marionette):
"""
Install gecko into the emulator using adb push. Restart b2g after the
installation.
"""
# See bug 800102. We use this particular method of installing
# gecko in order to avoid an adb bug in which adb will sometimes
# hang indefinitely while copying large files to the system
# partition.
push_attempts = 10
print 'installing gecko binaries...'
# see bug 809437 for the path that lead to this madness
try:
# need to remount so we can write to /system/b2g
self._run_adb(['remount'])
self.dm.removeDir('/data/local/b2g')
self.dm.mkDir('/data/local/b2g')
for root, dirs, files in os.walk(gecko_path):
for filename in files:
rel_path = os.path.relpath(os.path.join(root, filename), gecko_path)
data_local_file = os.path.join('/data/local/b2g', rel_path)
for retry in range(1, push_attempts + 1):
print 'pushing', data_local_file, '(attempt %s of %s)' % (retry, push_attempts)
try:
self.dm.pushFile(os.path.join(root, filename), data_local_file)
break
except DMError:
if retry == push_attempts:
raise
self.dm.shellCheckOutput(['stop', 'b2g'])
for root, dirs, files in os.walk(gecko_path):
for filename in files:
rel_path = os.path.relpath(os.path.join(root, filename), gecko_path)
data_local_file = os.path.join('/data/local/b2g', rel_path)
system_b2g_file = os.path.join('/system/b2g', rel_path)
print 'copying', data_local_file, 'to', system_b2g_file
try:
self.dm.shellCheckOutput(['dd',
'if=%s' % data_local_file,
'of=%s' % system_b2g_file])
except DMError:
if retry == push_attempts:
raise
self.restart_b2g()
except (DMError, MarionetteException):
# Bug 812395 - raise a single exception type for these so we can
# explicitly catch them elsewhere.
# print exception, but hide from mozharness error detection
exc = traceback.format_exc()
exc = exc.replace('Traceback', '_traceback')
print exc
raise InstallGeckoError("unable to restart B2G after installing gecko")
def install_busybox(self, busybox):
self._run_adb(['remount'])
push_attempts = 10
remote_file = "/system/bin/busybox"
for retry in range(1, push_attempts+1):
print 'pushing', remote_file, '(attempt %s of %s)' % (retry, push_attempts)
try:
self.dm.pushFile(busybox, remote_file)
break
except DMError:
if retry == push_attempts:
raise
self._run_adb(['shell', 'cd /system/bin; chmod 555 busybox; for x in `./busybox --list`; do ln -s ./busybox $x; done'])
self.dm._verifyZip()
def rotate_log(self, srclog, index=1):
""" Rotate a logfile, by recursively rotating logs further in the sequence,
deleting the last file if necessary.
"""
destlog = os.path.join(self.logcat_dir, 'emulator-%d.%d.log' % (self.port, index))
if os.access(destlog, os.F_OK):
if index == 3:
os.remove(destlog)
else:
self.rotate_log(destlog, index+1)
shutil.move(srclog, destlog)
def save_logcat(self):
""" Save the output of logcat to a file.
"""
filename = os.path.join(self.logcat_dir, "emulator-%d.log" % self.port)
if os.access(filename, os.F_OK):
self.rotate_log(filename)
cmd = [self.adb, '-s', 'emulator-%d' % self.port, 'logcat']
self.logcat_proc = LogcatProc(filename, cmd)
self.logcat_proc.run()
def setup_port_forwarding(self, remote_port):
""" Set up TCP port forwarding to the specified port on the device,
using any availble local port, and return the local port.
"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("",0))
local_port = s.getsockname()[1]
s.close()
output = self._run_adb(['forward',
'tcp:%d' % local_port,
'tcp:%d' % remote_port])
self.marionette_port = local_port
return local_port
def wait_for_port(self, timeout=300):
assert(self.marionette_port)
starttime = datetime.datetime.now()
while datetime.datetime.now() - starttime < datetime.timedelta(seconds=timeout):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', self.marionette_port))
data = sock.recv(16)
sock.close()
if '"from"' in data:
return True
except:
import traceback
print traceback.format_exc()
time.sleep(1)
return False
| apache-2.0 |
apdjustino/DRCOG_Urbansim | synthicity/urbansim/lcmnl.py | 1 | 2370 | from synthicity.urbansim import interaction, mnl
import numpy as np, pandas as pd
import time
GPU = 0
EMTOL = 1e-02
MAXITER = 10000
def prep_cm_data(cmdata,numclasses):
numobs, numvars = cmdata.shape
newcmdata = np.zeros((numobs*numclasses,numvars*(numclasses-1)))
for i in range(cmdata.shape[0]):
for j in range(1,numclasses):
newcmdata[i*numclasses+j,(j-1)*numvars:j*numvars] = cmdata[i]
return newcmdata
def lcmnl_estimate(cmdata,numclasses,csdata,numalts,chosen,maxiter=MAXITER,emtol=EMTOL,\
skipprep=False,csbeta=None,cmbeta=None):
loglik = -999999
if csbeta is None: csbeta = [np.random.rand(csdata.shape[1]) for i in range(numclasses)]
if not skipprep: cmdata = prep_cm_data(cmdata,numclasses)
if cmbeta is None: cmbeta = np.zeros(cmdata.shape[1])
for i in range(maxiter):
print "Running iteration %d" % (i+1)
print time.ctime()
# EXPECTATION
print "Running class membership model"
cmprobs = mnl.mnl_simulate(cmdata,cmbeta,numclasses,GPU=GPU,returnprobs=1)
csprobs = []
for cno in range(numclasses):
tmp = mnl.mnl_simulate(csdata,csbeta[cno],numalts,GPU=GPU,returnprobs=1)
tmp = np.sum(tmp*chosen,axis=1) # keep only chosen probs
csprobs.append(np.reshape(tmp,(-1,1)))
csprobs = np.concatenate(csprobs,axis=1)
h = csprobs * cmprobs
oldloglik = loglik
loglik = np.sum(np.log(np.sum(h,axis=1)))
print "current csbeta", csbeta
print "current cmbeta", cmbeta
print "current loglik", loglik, i+1, "\n\n"
if abs(loglik-oldloglik) < emtol: break
wts = h / np.reshape(np.sum(h,axis=1),(-1,1))
# MAXIMIZATION
for cno in range(numclasses):
print "Estimating class specific model for class %d" % (cno+1)
t1 = time.time()
weights=np.reshape(wts[:,cno],(-1,1))
print weights.shape
fit, results = mnl.mnl_estimate(csdata,chosen,numalts,GPU=GPU,weights=weights,beta=csbeta[cno])
print "Finished in %fs" % (time.time()-t1)
csbeta[cno] = zip(*results)[0]
print "Estimating class membership model"
t1 = time.time()
fit, results = mnl.mnl_estimate(cmdata,None,numclasses,GPU=GPU,weights=wts,lcgrad=True, \
beta=cmbeta,coeffrange=(-1000,1000))
print "Finished in %fs" % (time.time()-t1)
cmbeta = zip(*results)[0]
| agpl-3.0 |
justinslee/Wai-Not-Makahiki | makahiki/apps/widgets/participation/admin.py | 9 | 2043 | """Administrator interface to score_mgr."""
from django.contrib import admin
from apps.managers.challenge_mgr import challenge_mgr
from apps.widgets.participation.models import ParticipationSetting, TeamParticipation
from apps.admin.admin import challenge_designer_site, challenge_manager_site, developer_site
class ParticipationSettingAdmin(admin.ModelAdmin):
"""EnergyGoal administrator interface definition."""
list_display = ["name", ]
list_display_links = ["name", ]
page_text = "There must only be one Participation Setting. You can edit the amount" + \
" of points awarded per player for the various levels of team participation."
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(ParticipationSetting, ParticipationSettingAdmin)
challenge_designer_site.register(ParticipationSetting, ParticipationSettingAdmin)
challenge_manager_site.register(ParticipationSetting, ParticipationSettingAdmin)
developer_site.register(ParticipationSetting, ParticipationSettingAdmin)
challenge_mgr.register_designer_game_info_model("Participation Game", ParticipationSetting)
class TeamParticipationAdmin(admin.ModelAdmin):
"""EnergyGoal administrator interface definition."""
list_display = ["round_name", "team", "participation", "awarded_percent", "updated_at"]
list_filter = ["round_name"]
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(TeamParticipation, TeamParticipationAdmin)
challenge_designer_site.register(TeamParticipation, TeamParticipationAdmin)
challenge_manager_site.register(TeamParticipation, TeamParticipationAdmin)
developer_site.register(TeamParticipation, TeamParticipationAdmin)
challenge_mgr.register_developer_game_info_model("Participation Game", ParticipationSetting)
challenge_mgr.register_developer_game_info_model("Participation Game", TeamParticipation)
| mit |
SerCeMan/intellij-community | python/helpers/pycharm/django_manage_commands_provider/_parser/_optparse.py | 80 | 1322 | # coding=utf-8
"""
Fetches arguments from optparse-based Django (< 1.8)
"""
__author__ = 'Ilya.Kazakevich'
from django_manage_commands_provider._parser import _utils
# noinspection PyUnusedLocal
# Parser here by contract
def process_command(dumper, command, parser):
"""
Fetches arguments and options from command and parser and reports em to dumper.
:param dumper dumper to output data to
:param parser opt parser to use
:param command django command
:type dumper _xml.XmlDumper
:type parser optparse.OptionParser
:type command django.core.management.base.BaseCommand
"""
dumper.set_arguments(str(command.args)) # args should be string, but in some buggy cases it is not
# TODO: support subcommands
for opt in command.option_list:
num_of_args = int(opt.nargs) if opt.nargs else 0
opt_type = None
if num_of_args > 0:
opt_type = _utils.get_opt_type(opt)
# There is no official way to access this field, so I use protected one. At least it is public API.
# noinspection PyProtectedMember
dumper.add_command_option(
long_opt_names=opt._long_opts,
short_opt_names=opt._short_opts,
help_text=opt.help,
argument_info=(num_of_args, opt_type) if num_of_args else None)
| apache-2.0 |
geodrinx/gearthview | ext-libs/twisted/web/test/_util.py | 33 | 2475 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
General helpers for L{twisted.web} unit tests.
"""
from twisted.internet.defer import succeed
from twisted.web import server
from twisted.trial.unittest import TestCase
from twisted.python.failure import Failure
from twisted.web._flatten import flattenString
from twisted.web.error import FlattenerError
def _render(resource, request):
result = resource.render(request)
if isinstance(result, str):
request.write(result)
request.finish()
return succeed(None)
elif result is server.NOT_DONE_YET:
if request.finished:
return succeed(None)
else:
return request.notifyFinish()
else:
raise ValueError("Unexpected return value: %r" % (result,))
class FlattenTestCase(TestCase):
"""
A test case that assists with testing L{twisted.web._flatten}.
"""
def assertFlattensTo(self, root, target):
"""
Assert that a root element, when flattened, is equal to a string.
"""
d = flattenString(None, root)
d.addCallback(lambda s: self.assertEqual(s, target))
return d
def assertFlattensImmediately(self, root, target):
"""
Assert that a root element, when flattened, is equal to a string, and
performs no asynchronus Deferred anything.
This version is more convenient in tests which wish to make multiple
assertions about flattening, since it can be called multiple times
without having to add multiple callbacks.
@return: the result of rendering L{root}, which should be equivalent to
L{target}.
@rtype: L{bytes}
"""
results = []
it = self.assertFlattensTo(root, target)
it.addBoth(results.append)
# Do our best to clean it up if something goes wrong.
self.addCleanup(it.cancel)
if not results:
self.fail("Rendering did not complete immediately.")
result = results[0]
if isinstance(result, Failure):
result.raiseException()
return results[0]
def assertFlatteningRaises(self, root, exn):
"""
Assert flattening a root element raises a particular exception.
"""
d = self.assertFailure(self.assertFlattensTo(root, ''), FlattenerError)
d.addCallback(lambda exc: self.assertIsInstance(exc._exception, exn))
return d
| gpl-3.0 |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_dummy_threading.py | 194 | 1840 | from test import test_support
import unittest
import dummy_threading as _threading
import time
class DummyThreadingTestCase(unittest.TestCase):
class TestThread(_threading.Thread):
def run(self):
global running
global sema
global mutex
# Uncomment if testing another module, such as the real 'threading'
# module.
#delay = random.random() * 2
delay = 0
if test_support.verbose:
print 'task', self.name, 'will run for', delay, 'sec'
sema.acquire()
mutex.acquire()
running += 1
if test_support.verbose:
print running, 'tasks are running'
mutex.release()
time.sleep(delay)
if test_support.verbose:
print 'task', self.name, 'done'
mutex.acquire()
running -= 1
if test_support.verbose:
print self.name, 'is finished.', running, 'tasks are running'
mutex.release()
sema.release()
def setUp(self):
self.numtasks = 10
global sema
sema = _threading.BoundedSemaphore(value=3)
global mutex
mutex = _threading.RLock()
global running
running = 0
self.threads = []
def test_tasks(self):
for i in range(self.numtasks):
t = self.TestThread(name="<thread %d>"%i)
self.threads.append(t)
t.start()
if test_support.verbose:
print 'waiting for all tasks to complete'
for t in self.threads:
t.join()
if test_support.verbose:
print 'all tasks done'
def test_main():
test_support.run_unittest(DummyThreadingTestCase)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
ewmoore/numpy | numpy/oldnumeric/rng.py | 99 | 3754 | # This module re-creates the RNG interface from Numeric
# Replace import RNG with import numpy.oldnumeric.rng as RNG
#
# It is for backwards compatibility only.
__all__ = ['CreateGenerator','ExponentialDistribution','LogNormalDistribution',
'NormalDistribution', 'UniformDistribution', 'error', 'ranf',
'default_distribution', 'random_sample', 'standard_generator']
import numpy.random.mtrand as mt
import math
class error(Exception):
pass
class Distribution(object):
def __init__(self, meth, *args):
self._meth = meth
self._args = args
def density(self,x):
raise NotImplementedError
def __call__(self, x):
return self.density(x)
def _onesample(self, rng):
return getattr(rng, self._meth)(*self._args)
def _sample(self, rng, n):
kwds = {'size' : n}
return getattr(rng, self._meth)(*self._args, **kwds)
class ExponentialDistribution(Distribution):
def __init__(self, lambda_):
if (lambda_ <= 0):
raise error, "parameter must be positive"
Distribution.__init__(self, 'exponential', lambda_)
def density(x):
if x < 0:
return 0.0
else:
lambda_ = self._args[0]
return lambda_*math.exp(-lambda_*x)
class LogNormalDistribution(Distribution):
def __init__(self, m, s):
m = float(m)
s = float(s)
if (s <= 0):
raise error, "standard deviation must be positive"
Distribution.__init__(self, 'lognormal', m, s)
sn = math.log(1.0+s*s/(m*m));
self._mn = math.log(m)-0.5*sn
self._sn = math.sqrt(sn)
self._fac = 1.0/math.sqrt(2*math.pi)/self._sn
def density(x):
m,s = self._args
y = (math.log(x)-self._mn)/self._sn
return self._fac*math.exp(-0.5*y*y)/x
class NormalDistribution(Distribution):
def __init__(self, m, s):
m = float(m)
s = float(s)
if (s <= 0):
raise error, "standard deviation must be positive"
Distribution.__init__(self, 'normal', m, s)
self._fac = 1.0/math.sqrt(2*math.pi)/s
def density(x):
m,s = self._args
y = (x-m)/s
return self._fac*math.exp(-0.5*y*y)
class UniformDistribution(Distribution):
def __init__(self, a, b):
a = float(a)
b = float(b)
width = b-a
if (width <=0):
raise error, "width of uniform distribution must be > 0"
Distribution.__init__(self, 'uniform', a, b)
self._fac = 1.0/width
def density(x):
a, b = self._args
if (x < a) or (x >= b):
return 0.0
else:
return self._fac
default_distribution = UniformDistribution(0.0,1.0)
class CreateGenerator(object):
def __init__(self, seed, dist=None):
if seed <= 0:
self._rng = mt.RandomState()
elif seed > 0:
self._rng = mt.RandomState(seed)
if dist is None:
dist = default_distribution
if not isinstance(dist, Distribution):
raise error, "Not a distribution object"
self._dist = dist
def ranf(self):
return self._dist._onesample(self._rng)
def sample(self, n):
return self._dist._sample(self._rng, n)
standard_generator = CreateGenerator(-1)
def ranf():
"ranf() = a random number from the standard generator."
return standard_generator.ranf()
def random_sample(*n):
"""random_sample(n) = array of n random numbers;
random_sample(n1, n2, ...)= random array of shape (n1, n2, ..)"""
if not n:
return standard_generator.ranf()
m = 1
for i in n:
m = m * i
return standard_generator.sample(m).reshape(*n)
| bsd-3-clause |
40223101/2015final2 | static/Brython3.1.0-20150301-090019/Lib/abc.py | 765 | 8057 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-3.0 |
beiko-lab/gengis | bin/Lib/pickletools.py | 13 | 76813 | '''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
__all__ = ['dis', 'genops', 'optimize']
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
"""
"A pickle" is a program for a virtual pickle machine (PM, but more accurately
called an unpickling machine). It's a sequence of opcodes, interpreted by the
PM, building an arbitrarily complex Python object.
For the most part, the PM is very simple: there are no looping, testing, or
conditional instructions, no arithmetic and no function calls. Opcodes are
executed once each, from first to last, until a STOP opcode is reached.
The PM has two data areas, "the stack" and "the memo".
Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
integer object on the stack, whose value is gotten from a decimal string
literal immediately following the INT opcode in the pickle bytestream. Other
opcodes take Python objects off the stack. The result of unpickling is
whatever object is left on the stack when the final STOP opcode is executed.
The memo is simply an array of objects, or it can be implemented as a dict
mapping little integers to objects. The memo serves as the PM's "long term
memory", and the little integers indexing the memo are akin to variable
names. Some opcodes pop a stack object into the memo at a given index,
and others push a memo object at a given index onto the stack again.
At heart, that's all the PM has. Subtleties arise for these reasons:
+ Object identity. Objects can be arbitrarily complex, and subobjects
may be shared (for example, the list [a, a] refers to the same object a
twice). It can be vital that unpickling recreate an isomorphic object
graph, faithfully reproducing sharing.
+ Recursive objects. For example, after "L = []; L.append(L)", L is a
list, and L[0] is the same list. This is related to the object identity
point, and some sequences of pickle opcodes are subtle in order to
get the right result in all cases.
+ Things pickle doesn't know everything about. Examples of things pickle
does know everything about are Python's builtin scalar and container
types, like ints and tuples. They generally have opcodes dedicated to
them. For things like module references and instances of user-defined
classes, pickle's knowledge is limited. Historically, many enhancements
have been made to the pickle protocol in order to do a better (faster,
and/or more compact) job on those.
+ Backward compatibility and micro-optimization. As explained below,
pickle opcodes never go away, not even when better ways to do a thing
get invented. The repertoire of the PM just keeps growing over time.
For example, protocol 0 had two opcodes for building Python integers (INT
and LONG), protocol 1 added three more for more-efficient pickling of short
integers, and protocol 2 added two more for more-efficient pickling of
long integers (before protocol 2, the only ways to pickle a Python long
took time quadratic in the number of digits, for both pickling and
unpickling). "Opcode bloat" isn't so much a subtlety as a source of
wearying complication.
Pickle protocols:
For compatibility, the meaning of a pickle opcode never changes. Instead new
pickle opcodes get added, and each version's unpickler can handle all the
pickle opcodes in all protocol versions to date. So old pickles continue to
be readable forever. The pickler can generally be told to restrict itself to
the subset of opcodes available under previous protocol versions too, so that
users can create pickles under the current version readable by older
versions. However, a pickle does not contain its version number embedded
within it. If an older unpickler tries to read a pickle using a later
protocol, the result is most likely an exception due to seeing an unknown (in
the older unpickler) opcode.
The original pickle used what's now called "protocol 0", and what was called
"text mode" before Python 2.3. The entire pickle bytestream is made up of
printable 7-bit ASCII characters, plus the newline character, in protocol 0.
That's why it was called text mode. Protocol 0 is small and elegant, but
sometimes painfully inefficient.
The second major set of additions is now called "protocol 1", and was called
"binary mode" before Python 2.3. This added many opcodes with arguments
consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
bytes. Binary mode pickles can be substantially smaller than equivalent
text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
int as 4 bytes following the opcode, which is cheaper to unpickle than the
(perhaps) 11-character decimal string attached to INT. Protocol 1 also added
a number of opcodes that operate on many stack elements at once (like APPENDS
and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
The third major set of additions came in Python 2.3, and is called "protocol
2". This added:
- A better way to pickle instances of new-style classes (NEWOBJ).
- A way for a pickle to identify its protocol (PROTO).
- Time- and space- efficient pickling of long ints (LONG{1,4}).
- Shortcuts for small tuples (TUPLE{1,2,3}}.
- Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
- The "extension registry", a vector of popular objects that can be pushed
efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
the registry contents are predefined (there's nothing akin to the memo's
PUT).
Another independent change with Python 2.3 is the abandonment of any
pretense that it might be safe to load pickles received from untrusted
parties -- no sufficient security analysis has been done to guarantee
this and there isn't a use case that warrants the expense of such an
analysis.
To this end, all tests for __safe_for_unpickling__ or for
copy_reg.safe_constructors are removed from the unpickling code.
References to these variables in the descriptions below are to be seen
as describing unpickling in Python 2.2 and before.
"""
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import StringIO
>>> read_uint1(StringIO.StringIO('\xff'))
255
"""
data = f.read(1)
if data:
return ord(data)
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import StringIO
>>> read_uint2(StringIO.StringIO('\xff\x00'))
255
>>> read_uint2(StringIO.StringIO('\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import StringIO
>>> read_int4(StringIO.StringIO('\xff\x00\x00\x00'))
255
>>> read_int4(StringIO.StringIO('\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import StringIO
>>> read_stringnl(StringIO.StringIO("'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(StringIO.StringIO("\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around ''
>>> read_stringnl(StringIO.StringIO("\n"), stripquotes=False)
''
>>> read_stringnl(StringIO.StringIO("''\n"))
''
>>> read_stringnl(StringIO.StringIO('"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(StringIO.StringIO(r"'a\n\\b\x00c\td'" + "\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in "'\"":
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
# I'm not sure when 'string_escape' was added to the std codecs; it's
# crazy not to use it if it's there.
if decode:
data = data.decode('string_escape')
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, decode=False, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string4(f):
r"""
>>> import StringIO
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x00abc"))
''
>>> read_string4(StringIO.StringIO("\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_string1(f):
r"""
>>> import StringIO
>>> read_string1(StringIO.StringIO("\x00"))
''
>>> read_string1(StringIO.StringIO("\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import StringIO
>>> read_unicodestringnl(StringIO.StringIO("abc\uabcd\njunk"))
u'abc\uabcd'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return unicode(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring4(f):
r"""
>>> import StringIO
>>> s = u'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
'abcd\xea\xaf\x8d'
>>> n = chr(len(enc)) + chr(0) * 3 # little-endian 4-byte length
>>> t = read_unicodestring4(StringIO.StringIO(n + enc + 'junk'))
>>> s == t
True
>>> read_unicodestring4(StringIO.StringIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("unicodestring4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return unicode(data, 'utf-8')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import StringIO
>>> read_decimalnl_short(StringIO.StringIO("1234\n56"))
1234
>>> read_decimalnl_short(StringIO.StringIO("1234L\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' not allowed in '1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s.endswith("L"):
raise ValueError("trailing 'L' not allowed in %r" % s)
# It's not necessarily true that the result fits in a Python short int:
# the pickle may have been written on a 64-bit box. There's also a hack
# for True and False here.
if s == "00":
return False
elif s == "01":
return True
try:
return int(s)
except OverflowError:
return long(s)
def read_decimalnl_long(f):
r"""
>>> import StringIO
>>> read_decimalnl_long(StringIO.StringIO("1234\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' required in '1234'
Someday the trailing 'L' will probably go away from this output.
>>> read_decimalnl_long(StringIO.StringIO("1234L\n56"))
1234L
>>> read_decimalnl_long(StringIO.StringIO("123456789012345678901234L\n6"))
123456789012345678901234L
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if not s.endswith("L"):
raise ValueError("trailing 'L' required in %r" % s)
return long(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import StringIO, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(StringIO.StringIO(raw + "\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and cPickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import StringIO
>>> read_long1(StringIO.StringIO("\x00"))
0L
>>> read_long1(StringIO.StringIO("\x02\xff\x00"))
255L
>>> read_long1(StringIO.StringIO("\x02\xff\x7f"))
32767L
>>> read_long1(StringIO.StringIO("\x02\x00\xff"))
-256L
>>> read_long1(StringIO.StringIO("\x02\x00\x80"))
-32768L
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import StringIO
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x00"))
255L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x7f"))
32767L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\xff"))
-256L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\x80"))
-32768L
>>> read_long1(StringIO.StringIO("\x00\x00\x00\x00"))
0L
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the long 0L, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = StackObject(
name='int',
obtype=int,
doc="A short (as opposed to long) Python integer object.")
pylong = StackObject(
name='long',
obtype=long,
doc="A long (as opposed to short) Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, long, bool),
doc="A Python integer object (short or long), or "
"a Python bool.")
pybool = StackObject(
name='bool',
obtype=(bool,),
doc="A Python bool object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pystring = StackObject(
name='str',
obtype=str,
doc="A Python string object.")
pyunicode = StackObject(
name='unicode',
obtype=unicode,
doc="A Python Unicode string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjuction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= 2
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pylong],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pystring],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte signed
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=int4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
signed little-endian integer following.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If type(callable) is not ClassType, REDUCE complains unless the
callable has been registered with the copy_reg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
This may raise RuntimeError in restricted execution mode (which
disallows access to __dict__ directly); in that case, the object
is updated instead via
for k, v in argument.items():
anyobject[k] = v
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ It's an old-style class object (the type of the class object is
ClassType).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done. In
restricted execution mode it can fail (assignment to __class__ is
disallowed), and I'm not really sure what happens then -- it looks
like the code ends up calling the class object's __init__ anyway,
via falling into the next case.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug; cPickle
requires the attribute to be true). If __safe_for_unpickling__
doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug; cPickle does test __safe_for_unpickling__). See INST for
the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
import pickle, re
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print "skipping %r: it doesn't look like an opcode name" % name
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, str) or len(picklecode) != 1:
if verbose:
print ("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode))
continue
if picklecode in copy:
if verbose:
print "checking name %r w/ code %r for consistency" % (
name, picklecode)
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a string object,
it's wrapped in a StringIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
import cStringIO as StringIO
if isinstance(pickle, str):
pickle = StringIO.StringIO(pickle)
if hasattr(pickle, "tell"):
getpos = pickle.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = pickle.read(1)
opcode = code2op.get(code)
if opcode is None:
if code == "":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
pos is None and "<unknown>" or pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(pickle)
yield opcode, arg, pos
if code == '.':
assert opcode.name == 'STOP'
break
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
gets = set() # set of args used by a GET opcode
puts = [] # (arg, startpos, stoppos) for the PUT opcodes
prevpos = None # set to pos if previous opcode was a PUT
for opcode, arg, pos in genops(p):
if prevpos is not None:
puts.append((prevarg, prevpos, pos))
prevpos = None
if 'PUT' in opcode.name:
prevarg, prevpos = arg, pos
elif 'GET' in opcode.name:
gets.add(arg)
# Copy the pickle string except for PUTS without a corresponding GET
s = []
i = 0
for arg, start, stop in puts:
j = stop if (arg in gets) else start
s.append(p[i:j])
i = stop
s.append(p[i:])
return ''.join(s)
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg indentlevel is the number of blanks by which to indent
a new MARK level. It defaults to 4.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpicker memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
for opcode, arg, pos in genops(pickle):
if pos is not None:
print >> out, "%5d:" % pos,
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT"):
assert arg is not None
if arg in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[arg] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
print >> out, line
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print >> out, "highest protocol among opcodes =", maxproto
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {'abc': u"def"}]
>>> pkl = pickle.dumps(x, 0)
>>> dis(pkl)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: I INT 1
8: a APPEND
9: I INT 2
12: a APPEND
13: ( MARK
14: I INT 3
17: I INT 4
20: t TUPLE (MARK at 13)
21: p PUT 1
24: a APPEND
25: ( MARK
26: d DICT (MARK at 25)
27: p PUT 2
30: S STRING 'abc'
37: p PUT 3
40: V UNICODE u'def'
45: p PUT 4
48: s SETITEM
49: a APPEND
50: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl = pickle.dumps(x, 1)
>>> dis(pkl)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: U SHORT_BINSTRING 'abc'
24: q BINPUT 3
26: X BINUNICODE u'def'
34: q BINPUT 4
36: s SETITEM
37: e APPENDS (MARK at 3)
38: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: i INST 'pickletools _Example' (MARK at 5)
28: p PUT 1
31: ( MARK
32: d DICT (MARK at 31)
33: p PUT 2
36: S STRING 'value'
45: p PUT 3
48: I INT 42
52: s SETITEM
53: b BUILD
54: a APPEND
55: g GET 1
58: a APPEND
59: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: ( MARK
5: c GLOBAL 'pickletools _Example'
27: q BINPUT 1
29: o OBJ (MARK at 4)
30: q BINPUT 2
32: } EMPTY_DICT
33: q BINPUT 3
35: U SHORT_BINSTRING 'value'
42: q BINPUT 4
44: K BININT1 42
46: s SETITEM
47: b BUILD
48: h BINGET 2
50: e APPENDS (MARK at 3)
51: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> from StringIO import StringIO
>>> f = StringIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| gpl-3.0 |
russellb/nova | nova/tests/test_network.py | 1 | 66400 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
import nova.policy
from nova import rpc
from nova import test
from nova import utils
from nova.network import manager as network_manager
from nova.tests import fake_network
LOG = logging.getLogger(__name__)
HOST = "testhost"
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2'},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_id': 0},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_id': 0},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_id': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.network = network_manager.FlatManager(host=HOST)
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.instance_dns_manager = temp
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def tearDown(self):
super(FlatNetworkTestCase, self).tearDown()
self.network.instance_dns_manager.delete_dns_file()
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, (nw, info) in enumerate(nw_info):
check = {'bridge': 'fake_br%d' % i,
'cidr': '192.168.%s.0/24' % i,
'cidr_v6': 'DONTCARE',
'id': 'DONTCARE',
'multi_host': False,
'injected': False,
'bridge_interface': 'DONTCARE',
'vlan': None}
self.assertDictMatch(nw, check)
check = {'broadcast': '192.168.%d.255' % i,
'dhcp_server': '192.168.%d.1' % i,
'dns': 'DONTCARE',
'gateway': '192.168.%d.1' % i,
'gateway_v6': 'DONTCARE',
'ip6s': 'DONTCARE',
'ips': 'DONTCARE',
'label': 'test%d' % i,
'mac': 'DE:AD:BE:EF:00:%02x' % i,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % i,
'rxtx_cap': 0,
'should_create_vlan': False,
'should_create_bridge': False}
self.assertDictMatch(info, check)
check = [{'enabled': 'DONTCARE',
'ip': 'DONTCARE',
'gateway': 'DONTCARE',
'netmask': 64}]
self.assertDictListMatch(info['ip6s'], check)
num_fixed_ips = len(info['ips'])
check = [{'enabled': 'DONTCARE',
'ip': '192.168.%d.1%02d' % (i, ip_num),
'gateway': 'DONTCARE',
'netmask': '255.255.255.0'}
for ip_num in xrange(num_fixed_ips)]
self.assertDictListMatch(info['ips'], check)
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[1])
ip = fixed_ips[1].copy()
ip['instance_id'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_instance_dns(self):
fixedip = '192.168.0.101'
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixedip)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
addresses = instance_manager.get_entries_by_name('test-00001',
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
reserved=True).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(None, 0, network, vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
context_admin,
0,
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, 0, network)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_validate_networks(self):
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
fixed_ips[1]['network_id'] = networks[1]['id']
fixed_ips[1]['instance_id'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixed_ips[1])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id + '1'}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return {'address': '10.0.0.1'}
def fake2(*args, **kwargs):
return 25
def fake3(*args, **kwargs):
return 0
self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake1)
# this time should raise
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake2)
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
ctxt,
ctxt.project_id)
# this time should not
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake3)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
def test_deallocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': 1}
def fake3(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': None}
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
def test_associate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that's already associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# floating ip that isn't associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False, 'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise exception.ProcessExecutionError('',
'Cannot find device "em0"\n')
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'cast', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# floating ip that is associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False,
'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'cast', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}],
'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_ip_association_and_allocation_of_other_project(self):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project"""
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
address = '1.2.3.4'
float_addr = db.floating_ip_create(context1.elevated(),
{'address': address,
'project_id': context1.project_id})
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['id'])
# Associate the IP with non-admin user context
self.assertRaises(exception.NotAuthorized,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.NotAuthorized,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.NotAuthorized,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr)
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
def fake_create_fixed_ips(self, context, network_id):
return None
def test_remove_fixed_ip_from_instance(self):
manager = fake_network.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(self.context, 99, '10.0.0.1')
self.assertEquals(manager.deallocate_called, '10.0.0.1')
def test_remove_fixed_ip_from_instance_bad_input(self):
manager = fake_network.FakeNetworkManager()
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs)
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_smaller_subnet_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/25'}])
self.mox.ReplayAll()
# ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_split_smaller_cidr_in_use2(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/27' in cidrs)
def test_validate_cidrs_split_all_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
in_use = [{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll()
# ValueError: cidr already in use
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
def test_validate_cidrs_conflict_existing_supernet(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
# ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_get_instance_uuids_by_ip_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id'])
def test_get_instance_uuids_by_ipv6_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id'])
def test_get_instance_uuids_by_ip(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
def test_get_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn(networks)
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
def test_get_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn([])
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
def test_get_all_networks(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all')
manager.db.network_get_all(mox.IgnoreArg()).\
AndReturn(networks)
self.mox.ReplayAll()
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
def test_disassociate_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn(networks)
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
def test_disassociate_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).\
AndReturn([])
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP"""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP"""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes."""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return {}
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP"""
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP"""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.network = TestFloatingIPManager()
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.floating_dns_manager = temp
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def tearDown(self):
super(FloatingIPTestCase, self).tearDown()
self.network.floating_dns_manager.delete_dns_file()
def test_double_deallocation(self):
instance_ref = db.api.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 2)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[1]['domain'], domain2)
self.assertEquals(domains[0]['project'], 'testproject')
self.assertEquals(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEquals(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
nova.policy.reset()
nova.policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
nova.policy.reset()
def _set_rules(self, rules):
nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
nova.policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
network_manager.check_policy(self.context, 'get_all')
self.mox.UnsetStubs()
self.mox.VerifyAll()
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS"""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.network = TestFloatingIPManager()
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.instance_dns_manager = temp
temp = utils.import_object('nova.network.dns_driver.DNSDriver')
self.network.floating_dns_manager = temp
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def tearDown(self):
super(InstanceDNSTestCase, self).tearDown()
self.network.instance_dns_manager.delete_dns_file()
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 1)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS"""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
temp = utils.import_object('nova.network.ldapdns.FakeLdapDNS')
self.driver = temp
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
super(LdapDNSTestCase, self).tearDown()
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
| apache-2.0 |
admetricks/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py | 124 | 6488 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.host_mock import MockHost
from .layouttestresultsreader import LayoutTestResultsReader
class LayoutTestResultsReaderTest(unittest.TestCase):
def test_missing_layout_test_results(self):
host = MockHost()
reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
layout_tests_results_path = '/mock-results/full_results.json'
unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
host.filesystem = MockFileSystem({layout_tests_results_path: None,
unit_tests_results_path: None})
# Make sure that our filesystem mock functions as we expect.
self.assertRaises(IOError, host.filesystem.read_text_file, layout_tests_results_path)
self.assertRaises(IOError, host.filesystem.read_text_file, unit_tests_results_path)
# layout_test_results shouldn't raise even if the results.json file is missing.
self.assertIsNone(reader.results())
def test_create_unit_test_results(self):
host = MockHost()
reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="3" failures="0" disabled="0" errors="0" time="11.35" name="AllTests">
<testsuite name="RenderTableCellDeathTest" tests="3" failures="0" disabled="0" errors="0" time="0.677">
<testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
<testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
<testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
</testsuite>
</testsuites>"""
host.filesystem = MockFileSystem({unit_tests_results_path: no_failures_xml})
self.assertEqual(reader._create_unit_test_results(), [])
def test_missing_unit_test_results_path(self):
host = MockHost()
reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
reader._create_layout_test_results = lambda: LayoutTestResults([])
reader._create_unit_test_results = lambda: None
# layout_test_results shouldn't raise even if the unit tests xml file is missing.
self.assertIsNotNone(reader.results(), None)
self.assertEqual(reader.results().failing_tests(), [])
def test_layout_test_results(self):
reader = LayoutTestResultsReader(MockHost(), "/mock-results", "/var/logs")
reader._read_file_contents = lambda path: None
self.assertIsNone(reader.results())
reader._read_file_contents = lambda path: ""
self.assertIsNone(reader.results())
reader._create_layout_test_results = lambda: LayoutTestResults([])
results = reader.results()
self.assertIsNotNone(results)
self.assertEqual(results.failure_limit_count(), 30) # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
def test_archive_last_layout_test_results(self):
host = MockHost()
results_directory = "/mock-results"
reader = LayoutTestResultsReader(host, results_directory, "/var/logs")
patch = host.bugs.fetch_attachment(10001)
host.filesystem = MockFileSystem()
# Should fail because the results_directory does not exist.
expected_logs = "/mock-results does not exist, not archiving.\n"
archive = OutputCapture().assert_outputs(self, reader.archive, [patch], expected_logs=expected_logs)
self.assertIsNone(archive)
host.filesystem.maybe_make_directory(results_directory)
self.assertTrue(host.filesystem.exists(results_directory))
self.assertIsNotNone(reader.archive(patch))
self.assertFalse(host.filesystem.exists(results_directory))
def test_archive_last_layout_test_results_with_relative_path(self):
host = MockHost()
results_directory = "/mock-checkout/layout-test-results"
host.filesystem.maybe_make_directory(results_directory)
host.filesystem.maybe_make_directory('/var/logs')
self.assertTrue(host.filesystem.exists(results_directory))
host.filesystem.chdir('/var')
reader = LayoutTestResultsReader(host, results_directory, 'logs')
patch = host.bugs.fetch_attachment(10001)
# Should fail because the results_directory does not exist.
self.assertIsNotNone(reader.archive(patch))
self.assertEqual(host.workspace.source_path, results_directory)
self.assertEqual(host.workspace.zip_path, '/var/logs/50000-layout-test-results.zip')
| bsd-3-clause |
muthhus/kubernetes | examples/cluster-dns/images/backend/server.py | 468 | 1313 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World!")
try:
# Create a web server and define the handler to manage the incoming request.
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| apache-2.0 |
chrisdjscott/Atoman | atoman/plotting/plotDialog.py | 1 | 6491 |
"""
Plot dialog.
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import traceback
import logging
from PySide2 import QtGui, QtCore, QtWidgets
import matplotlib
from six.moves import zip
matplotlib.use("Qt5Agg")
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib import rc
from ..visutils.utilities import iconPath
################################################################################
class PlotDialog(QtWidgets.QDialog):
"""
Dialog for displaying a plot.
"""
def __init__(self, parent, mainWindow, dlgTitle, plotType, plotArgs, plotKwargs, settingsDict={}):
super(PlotDialog, self).__init__(parent)
self.parent = parent
self.mainWindow = mainWindow
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.setWindowTitle("Plotter - %s" % dlgTitle)
self.setWindowIcon(QtGui.QIcon(iconPath("oxygen/office-chart-bar.png")))
# settings
settings = self.mainWindow.preferences.matplotlibForm
figWidth = settings.figWidth
figHeight = settings.figHeight
figDpi = settings.figDpi
showGrid = settings.showGrid
fontsize = settings.fontsize
tickFontsize = settings.tickFontsize
legendFontsize = settings.legendFontsize
# set dimension of dialog
self.dlgWidth = figWidth * figDpi + 20
self.dlgHeight = figHeight * figDpi + 100
self.resize(self.dlgWidth, self.dlgHeight)
# make size fixed
self.setMinimumSize(self.dlgWidth, self.dlgHeight)
self.setMaximumSize(self.dlgWidth, self.dlgHeight)
# plot widget
self.mainWidget = QtWidgets.QWidget(self)
# setup figure
self.fig = Figure((figWidth, figHeight), dpi=figDpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.mainWidget)
# axes
self.axes = self.fig.add_subplot(111)
# toolbar
self.mplToolbar = NavigationToolbar(self.canvas, self.mainWidget)
# get plot method
if hasattr(self.axes, plotType):
plotMethod = getattr(self.axes, plotType)
try:
# plot
plotMethod(*plotArgs, **plotKwargs)
# store plot args for later use
self.plotArgs = plotArgs
except Exception as e:
self.mainWindow.displayError("Matplotlib plot failed with following error:\n\n%s" % "".join(traceback.format_exception(*sys.exc_info())))
self.close()
else:
self.mainWindow.displayError("Unrecognised matplotlib plot method:\n\n%s" % plotType)
# show grid
if showGrid:
self.axes.grid(True)
# text size
for tick in self.axes.xaxis.get_major_ticks():
tick.label1.set_fontsize(tickFontsize)
for tick in self.axes.yaxis.get_major_ticks():
tick.label1.set_fontsize(tickFontsize)
# axis labels (if specified!)
if "xlabel" in settingsDict:
self.axes.set_xlabel(settingsDict["xlabel"], fontsize=fontsize)
if "ylabel" in settingsDict:
self.axes.set_ylabel(settingsDict["ylabel"], fontsize=fontsize)
if "title" in settingsDict:
self.axes.set_title(settingsDict["title"], fontsize=fontsize)
# tight layout
self.fig.tight_layout()
# draw canvas
self.canvas.draw()
# write to file button
writeDataButton = QtWidgets.QPushButton("Write csv")
writeDataButton.setAutoDefault(False)
writeDataButton.setDefault(False)
writeDataButton.clicked.connect(self.writeData)
writeDataButton.setToolTip("Write csv file containing plot data")
# close button
closeButton = QtWidgets.QPushButton("Close")
closeButton.clicked.connect(self.accept)
# button box
buttonBox = QtWidgets.QDialogButtonBox()
buttonBox.addButton(writeDataButton, QtWidgets.QDialogButtonBox.ActionRole)
buttonBox.addButton(closeButton, QtWidgets.QDialogButtonBox.AcceptRole)
# layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mplToolbar)
vbox.addWidget(buttonBox)
self.mainWidget.setLayout(vbox)
def writeData(self):
"""
Write data to csv file
"""
logger = logging.getLogger(__name__)
if hasattr(self, "plotArgs"):
showError = False
plotArgs = list(self.plotArgs)
if len(plotArgs) == 2:
try:
l0 = len(plotArgs[0])
l1 = len(plotArgs[1])
except TypeError:
showError = True
else:
if l0 == l1:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', '.')[0][0]
if len(filename):
logger.debug("Writing data to csv file: '%s'", filename)
#TODO: use numpy method?
f = open(filename, "w")
for x, y in zip(plotArgs[0], plotArgs[1]):
f.write("%r, %r\n" % (x, y))
f.close()
else:
showError = True
else:
showError = True
if showError:
self.mainWindow.displayError("Write data not implemented for this type of plot!\n\nFor histograms try selecting 'show as fraction'")
def closeEvent(self, event):
"""
Override close event.
"""
self.done(0)
| mit |
Affix/CouchPotatoServer | libs/rtorrent/lib/xmlrpc/http.py | 180 | 1195 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.compat import xmlrpclib
HTTPServerProxy = xmlrpclib.ServerProxy
| gpl-3.0 |
ewandor/home-assistant | homeassistant/components/device_tracker/ping.py | 10 | 2978 | """
Tracks devices by sending a ICMP echo request (ping).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.ping/
"""
import logging
import subprocess
import sys
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, DEFAULT_SCAN_INTERVAL, SOURCE_TYPE_ROUTER)
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant import util
from homeassistant import const
_LOGGER = logging.getLogger(__name__)
CONF_PING_COUNT = 'count'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(const.CONF_HOSTS): {cv.string: cv.string},
vol.Optional(CONF_PING_COUNT, default=1): cv.positive_int,
})
class Host(object):
"""Host object with ping detection."""
def __init__(self, ip_address, dev_id, hass, config):
"""Initialize the Host pinger."""
self.hass = hass
self.ip_address = ip_address
self.dev_id = dev_id
self._count = config[CONF_PING_COUNT]
if sys.platform == 'win32':
self._ping_cmd = ['ping', '-n 1', '-w', '1000', self.ip_address]
else:
self._ping_cmd = ['ping', '-n', '-q', '-c1', '-W1',
self.ip_address]
def ping(self):
"""Send an ICMP echo request and return True if success."""
pinger = subprocess.Popen(self._ping_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
try:
pinger.communicate()
return pinger.returncode == 0
except subprocess.CalledProcessError:
return False
def update(self, see):
"""Update device state by sending one or more ping messages."""
failed = 0
while failed < self._count: # check more times if host is unreachable
if self.ping():
see(dev_id=self.dev_id, source_type=SOURCE_TYPE_ROUTER)
return True
failed += 1
_LOGGER.debug("No response from %s failed=%d", self.ip_address, failed)
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the Host objects and return the update function."""
hosts = [Host(ip, dev_id, hass, config) for (dev_id, ip) in
config[const.CONF_HOSTS].items()]
interval = timedelta(seconds=len(hosts) * config[CONF_PING_COUNT]) + \
DEFAULT_SCAN_INTERVAL
_LOGGER.info("Started ping tracker with interval=%s on hosts: %s",
interval, ",".join([host.ip_address for host in hosts]))
def update(now):
"""Update all the hosts on every interval time."""
for host in hosts:
host.update(see)
track_point_in_utc_time(hass, update, util.dt.utcnow() + interval)
return True
return update(util.dt.utcnow())
| apache-2.0 |
2014c2g5/Number1 | static/Brython3.1.1-20150328-091302/Lib/_io.py | 629 | 72610 | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
import io
#brython fix me
#from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
SEEK_SET=0
SEEK_CUR=1
SEEK_END=2
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def __open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an IOError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line of bytes from the stream.
If limit is specified, at most limit bytes will be read.
Limit should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
#fix me brython
#io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes, where n is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
#io.RawIOBase.register(RawIOBase)
#fix me brython
#from _io import FileIO
#RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes, where n is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call,
where n is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
#fix me brython
#io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_io.{0}>".format(clsname)
else:
return "<_io.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
return memoryview(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except InterruptedError:
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except InterruptedError:
continue
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream, where n is an int.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
#fix me brython
#io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._b2cratio = 0.0
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<_io.TextIOWrapper"
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, non-crazy input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| gpl-3.0 |
iulian787/spack | var/spack/repos/builtin/packages/systemtap/package.py | 2 | 1226 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Systemtap(AutotoolsPackage):
"""SystemTap provides free software (GPL) infrastructure to
simplify the gathering of information about the running
Linux system. This assists diagnosis of a performance or
functional problem. SystemTap eliminates the need for the
developer to go through the tedious and disruptive instrument,
recompile, install, and reboot sequence that may be otherwise
required to collect data."""
homepage = "https://sourceware.org/systemtap/"
url = "https://sourceware.org/systemtap/ftp/releases/systemtap-4.3.tar.gz"
version('4.3', sha256='f8e206ed654c13a8b42245a342c1b5a4aafdf817c97bf3becbe3c8a43a4489ce')
version('4.2', sha256='0984ebe3162274988252ec35074021dc1e8420d87a8b35f437578562fce08781')
version('4.1', sha256='8efa1ee2b34f1c6b2f33a25313287d59c8ed1b00265e900aea874da8baca1e1d')
depends_on('gettext')
depends_on('elfutils')
def configure_args(self):
args = ['LDFLAGS=-lintl']
return args
| lgpl-2.1 |
openstack/cloudkitty | cloudkitty/rating/pyscripts/db/api.py | 1 | 2934 | # -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from oslo_config import cfg
from oslo_db import api as db_api
from cloudkitty.i18n import _
_BACKEND_MAPPING = {
'sqlalchemy': 'cloudkitty.rating.pyscripts.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF,
backend_mapping=_BACKEND_MAPPING,
lazy=True)
def get_instance():
"""Return a DB API instance."""
return IMPL
class NoSuchScript(Exception):
"""Raised when the script doesn't exist."""
def __init__(self, name=None, uuid=None):
super(NoSuchScript, self).__init__(
_("No such script: %(name)s (UUID: %(uuid)s)") % {'name': name,
'uuid': uuid})
self.name = name
self.uuid = uuid
class ScriptAlreadyExists(Exception):
"""Raised when the script already exists."""
def __init__(self, name, uuid):
super(ScriptAlreadyExists, self).__init__(
_("Script %(name)s already exists (UUID: %(uuid)s)") %
{'name': name, 'uuid': uuid})
self.name = name
self.uuid = uuid
class PyScripts(object, metaclass=abc.ABCMeta):
"""Base class for pyscripts configuration."""
@abc.abstractmethod
def get_migration(self):
"""Return a migrate manager.
"""
@abc.abstractmethod
def get_script(self, name=None, uuid=None):
"""Return a script object.
:param name: Filter on a script name.
:param uuid: The uuid of the script to get.
"""
@abc.abstractmethod
def list_scripts(self):
"""Return a UUID list of every scripts available.
"""
@abc.abstractmethod
def create_script(self, name, data):
"""Create a new script.
:param name: Name of the script to create.
:param data: Content of the python script.
"""
@abc.abstractmethod
def update_script(self, uuid, **kwargs):
"""Update a script.
:param uuid UUID of the script to modify.
:param data: Script data.
"""
@abc.abstractmethod
def delete_script(self, name=None, uuid=None):
"""Delete a list.
:param name: Name of the script to delete.
:param uuid: UUID of the script to delete.
"""
| apache-2.0 |
gardner/youtube-dl | youtube_dl/extractor/vice.py | 87 | 1354 | from __future__ import unicode_literals
from .common import InfoExtractor
from .ooyala import OoyalaIE
from ..utils import ExtractorError
class ViceIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?:[^/]+/)+(?P<id>.+)'
_TESTS = [
{
'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
'info_dict': {
'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
'ext': 'mp4',
'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
},
'params': {
# Requires ffmpeg (m3u8 manifest)
'skip_download': True,
},
}, {
'url': 'https://news.vice.com/video/experimenting-on-animals-inside-the-monkey-lab',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
try:
embed_code = self._search_regex(
r'embedCode=([^&\'"]+)', webpage,
'ooyala embed code')
ooyala_url = OoyalaIE._url_for_embed_code(embed_code)
except ExtractorError:
raise ExtractorError('The page doesn\'t contain a video', expected=True)
return self.url_result(ooyala_url, ie='Ooyala')
| unlicense |
gangadharkadam/vlinkerp | erpnext/accounts/utils.py | 3 | 15725 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import nowdate, cstr, flt, now, getdate, add_months
from frappe import throw, _
from frappe.utils import formatdate
import frappe.desk.reportview
class FiscalYearError(frappe.ValidationError): pass
class BudgetError(frappe.ValidationError): pass
@frappe.whitelist()
def get_fiscal_year(date=None, fiscal_year=None, label="Date", verbose=1, company=None):
return get_fiscal_years(date, fiscal_year, label, verbose, company)[0]
def get_fiscal_years(transaction_date=None, fiscal_year=None, label="Date", verbose=1, company=None):
# if year start date is 2012-04-01, year end date should be 2013-03-31 (hence subdate)
cond = " ifnull(disabled, 0) = 0"
if fiscal_year:
cond += " and fy.name = %(fiscal_year)s"
else:
cond += " and %(transaction_date)s >= fy.year_start_date and %(transaction_date)s <= fy.year_end_date"
if company:
cond += """ and (not exists(select name from `tabFiscal Year Company` fyc where fyc.parent = fy.name)
or exists(select company from `tabFiscal Year Company` fyc where fyc.parent = fy.name and fyc.company=%(company)s ))"""
fy = frappe.db.sql("""select fy.name, fy.year_start_date, fy.year_end_date from `tabFiscal Year` fy
where %s order by fy.year_start_date desc""" % cond, {
"fiscal_year": fiscal_year,
"transaction_date": transaction_date,
"company": company
})
if not fy:
error_msg = _("""{0} {1} not in any active Fiscal Year. For more details check {2}.""").format(label, formatdate(transaction_date), "https://erpnext.com/kb/accounts/fiscal-year-error")
if verbose==1: frappe.msgprint(error_msg)
raise FiscalYearError, error_msg
return fy
def validate_fiscal_year(date, fiscal_year, label=_("Date"), doc=None):
years = [f[0] for f in get_fiscal_years(date, label=label)]
if fiscal_year not in years:
if doc:
doc.fiscal_year = years[0]
else:
throw(_("{0} '{1}' not in Fiscal Year {2}").format(label, formatdate(date), fiscal_year))
@frappe.whitelist()
def get_balance_on(account=None, date=None, party_type=None, party=None):
if not account and frappe.form_dict.get("account"):
account = frappe.form_dict.get("account")
if not date and frappe.form_dict.get("date"):
date = frappe.form_dict.get("date")
if not party_type and frappe.form_dict.get("party_type"):
party_type = frappe.form_dict.get("party_type")
if not party and frappe.form_dict.get("party"):
party = frappe.form_dict.get("party")
cond = []
if date:
cond.append("posting_date <= '%s'" % date)
else:
# get balance of all entries that exist
date = nowdate()
try:
year_start_date = get_fiscal_year(date, verbose=0)[1]
except FiscalYearError:
if getdate(date) > getdate(nowdate()):
# if fiscal year not found and the date is greater than today
# get fiscal year for today's date and its corresponding year start date
year_start_date = get_fiscal_year(nowdate(), verbose=1)[1]
else:
# this indicates that it is a date older than any existing fiscal year.
# hence, assuming balance as 0.0
return 0.0
if account:
acc = frappe.get_doc("Account", account)
acc.check_permission("read")
# for pl accounts, get balance within a fiscal year
if acc.report_type == 'Profit and Loss':
cond.append("posting_date >= '%s' and voucher_type != 'Period Closing Voucher'" \
% year_start_date)
# different filter for group and ledger - improved performance
if acc.is_group:
cond.append("""exists (
select * from `tabAccount` ac where ac.name = gle.account
and ac.lft >= %s and ac.rgt <= %s
)""" % (acc.lft, acc.rgt))
else:
cond.append("""gle.account = "%s" """ % (account.replace('"', '\\"'), ))
if party_type and party:
cond.append("""gle.party_type = "%s" and gle.party = "%s" """ %
(party_type.replace('"', '\\"'), party.replace('"', '\\"')))
bal = frappe.db.sql("""
SELECT sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
FROM `tabGL Entry` gle
WHERE %s""" % " and ".join(cond))[0][0]
# if bal is None, return 0
return flt(bal)
@frappe.whitelist()
def add_ac(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
ac = frappe.new_doc("Account")
ac.update(args)
ac.old_parent = ""
ac.freeze_account = "No"
ac.insert()
return ac.name
@frappe.whitelist()
def add_cc(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
cc = frappe.new_doc("Cost Center")
cc.update(args)
cc.old_parent = ""
cc.insert()
return cc.name
def reconcile_against_document(args):
"""
Cancel JV, Update aginst document, split if required and resubmit jv
"""
for d in args:
check_if_jv_modified(d)
validate_allocated_amount(d)
against_fld = {
'Journal Entry' : 'against_jv',
'Sales Invoice' : 'against_invoice',
'Purchase Invoice' : 'against_voucher'
}
d['against_fld'] = against_fld[d['against_voucher_type']]
# cancel JV
jv_obj = frappe.get_doc('Journal Entry', d['voucher_no'])
jv_obj.make_gl_entries(cancel=1, adv_adj=1)
# update ref in JV Detail
update_against_doc(d, jv_obj)
# re-submit JV
jv_obj = frappe.get_doc('Journal Entry', d['voucher_no'])
jv_obj.make_gl_entries(cancel = 0, adv_adj =1)
def check_if_jv_modified(args):
"""
check if there is already a voucher reference
check if amount is same
check if jv is submitted
"""
ret = frappe.db.sql("""
select t2.{dr_or_cr} from `tabJournal Entry` t1, `tabJournal Entry Account` t2
where t1.name = t2.parent and t2.account = %(account)s
and t2.party_type = %(party_type)s and t2.party = %(party)s
and ifnull(t2.against_voucher, '')=''
and ifnull(t2.against_invoice, '')='' and ifnull(t2.against_jv, '')=''
and t1.name = %(voucher_no)s and t2.name = %(voucher_detail_no)s
and t1.docstatus=1 """.format(dr_or_cr = args.get("dr_or_cr")), args)
if not ret:
throw(_("""Payment Entry has been modified after you pulled it. Please pull it again."""))
def validate_allocated_amount(args):
if args.get("allocated_amt") < 0:
throw(_("Allocated amount can not be negative"))
elif args.get("allocated_amt") > args.get("unadjusted_amt"):
throw(_("Allocated amount can not greater than unadusted amount"))
def update_against_doc(d, jv_obj):
"""
Updates against document, if partial amount splits into rows
"""
jv_detail = jv_obj.get("accounts", {"name": d["voucher_detail_no"]})[0]
jv_detail.set(d["dr_or_cr"], d["allocated_amt"])
jv_detail.set(d["against_fld"], d["against_voucher"])
if d['allocated_amt'] < d['unadjusted_amt']:
jvd = frappe.db.sql("""select cost_center, balance, against_account, is_advance
from `tabJournal Entry Account` where name = %s""", d['voucher_detail_no'])
# new entry with balance amount
ch = jv_obj.append("accounts")
ch.account = d['account']
ch.party_type = d["party_type"]
ch.party = d["party"]
ch.cost_center = cstr(jvd[0][0])
ch.balance = flt(jvd[0][1])
ch.set(d['dr_or_cr'], flt(d['unadjusted_amt']) - flt(d['allocated_amt']))
ch.set(d['dr_or_cr']== 'debit' and 'credit' or 'debit', 0)
ch.against_account = cstr(jvd[0][2])
ch.is_advance = cstr(jvd[0][3])
ch.docstatus = 1
# will work as update after submit
jv_obj.flags.ignore_validate_update_after_submit = True
jv_obj.save()
def remove_against_link_from_jv(ref_type, ref_no, against_field):
linked_jv = frappe.db.sql_list("""select parent from `tabJournal Entry Account`
where `%s`=%s and docstatus < 2""" % (against_field, "%s"), (ref_no))
if linked_jv:
frappe.db.sql("""update `tabJournal Entry Account` set `%s`=null,
modified=%s, modified_by=%s
where `%s`=%s and docstatus < 2""" % (against_field, "%s", "%s", against_field, "%s"),
(now(), frappe.session.user, ref_no))
frappe.db.sql("""update `tabGL Entry`
set against_voucher_type=null, against_voucher=null,
modified=%s, modified_by=%s
where against_voucher_type=%s and against_voucher=%s
and voucher_no != ifnull(against_voucher, '')""",
(now(), frappe.session.user, ref_type, ref_no))
frappe.msgprint(_("Journal Entries {0} are un-linked".format("\n".join(linked_jv))))
@frappe.whitelist()
def get_company_default(company, fieldname):
value = frappe.db.get_value("Company", company, fieldname)
if not value:
throw(_("Please set default value {0} in Company {1}").format(frappe.get_meta("Company").get_label(fieldname), company))
return value
def fix_total_debit_credit():
vouchers = frappe.db.sql("""select voucher_type, voucher_no,
sum(debit) - sum(credit) as diff
from `tabGL Entry`
group by voucher_type, voucher_no
having sum(ifnull(debit, 0)) != sum(ifnull(credit, 0))""", as_dict=1)
for d in vouchers:
if abs(d.diff) > 0:
dr_or_cr = d.voucher_type == "Sales Invoice" and "credit" or "debit"
frappe.db.sql("""update `tabGL Entry` set %s = %s + %s
where voucher_type = %s and voucher_no = %s and %s > 0 limit 1""" %
(dr_or_cr, dr_or_cr, '%s', '%s', '%s', dr_or_cr),
(d.diff, d.voucher_type, d.voucher_no))
def get_stock_and_account_difference(account_list=None, posting_date=None):
from erpnext.stock.utils import get_stock_value_on
if not posting_date: posting_date = nowdate()
difference = {}
account_warehouse = dict(frappe.db.sql("""select name, warehouse from tabAccount
where account_type = 'Warehouse' and ifnull(warehouse, '') != ''
and name in (%s)""" % ', '.join(['%s']*len(account_list)), account_list))
for account, warehouse in account_warehouse.items():
account_balance = get_balance_on(account, posting_date)
stock_value = get_stock_value_on(warehouse, posting_date)
if abs(flt(stock_value) - flt(account_balance)) > 0.005:
difference.setdefault(account, flt(stock_value) - flt(account_balance))
return difference
def validate_expense_against_budget(args):
args = frappe._dict(args)
if frappe.db.get_value("Account", {"name": args.account, "report_type": "Profit and Loss"}):
budget = frappe.db.sql("""
select bd.budget_allocated, cc.distribution_id
from `tabCost Center` cc, `tabBudget Detail` bd
where cc.name=bd.parent and cc.name=%s and account=%s and bd.fiscal_year=%s
""", (args.cost_center, args.account, args.fiscal_year), as_dict=True)
if budget and budget[0].budget_allocated:
yearly_action, monthly_action = frappe.db.get_value("Company", args.company,
["yearly_bgt_flag", "monthly_bgt_flag"])
action_for = action = ""
if monthly_action in ["Stop", "Warn"]:
budget_amount = get_allocated_budget(budget[0].distribution_id,
args.posting_date, args.fiscal_year, budget[0].budget_allocated)
args["month_end_date"] = frappe.db.sql("select LAST_DAY(%s)",
args.posting_date)[0][0]
action_for, action = _("Monthly"), monthly_action
elif yearly_action in ["Stop", "Warn"]:
budget_amount = budget[0].budget_allocated
action_for, action = _("Annual"), yearly_action
if action_for:
actual_expense = get_actual_expense(args)
if actual_expense > budget_amount:
frappe.msgprint(_("{0} budget for Account {1} against Cost Center {2} will exceed by {3}").format(
_(action_for), args.account, args.cost_center, cstr(actual_expense - budget_amount)))
if action=="Stop":
raise BudgetError
def get_allocated_budget(distribution_id, posting_date, fiscal_year, yearly_budget):
if distribution_id:
distribution = {}
for d in frappe.db.sql("""select mdp.month, mdp.percentage_allocation
from `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md
where mdp.parent=md.name and md.fiscal_year=%s""", fiscal_year, as_dict=1):
distribution.setdefault(d.month, d.percentage_allocation)
dt = frappe.db.get_value("Fiscal Year", fiscal_year, "year_start_date")
budget_percentage = 0.0
while(dt <= getdate(posting_date)):
if distribution_id:
budget_percentage += distribution.get(getdate(dt).strftime("%B"), 0)
else:
budget_percentage += 100.0/12
dt = add_months(dt, 1)
return yearly_budget * budget_percentage / 100
def get_actual_expense(args):
args["condition"] = " and posting_date<='%s'" % args.month_end_date \
if args.get("month_end_date") else ""
return flt(frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabGL Entry`
where account='%(account)s' and cost_center='%(cost_center)s'
and fiscal_year='%(fiscal_year)s' and company='%(company)s' %(condition)s
""" % (args))[0][0])
def get_currency_precision(currency=None):
if not currency:
currency = frappe.db.get_value("Company",
frappe.db.get_default("company"), "default_currency", cache=True)
currency_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
from frappe.utils import get_number_format_info
return get_number_format_info(currency_format)[2]
def get_stock_rbnb_difference(posting_date, company):
stock_items = frappe.db.sql_list("""select distinct item_code
from `tabStock Ledger Entry` where company=%s""", company)
pr_valuation_amount = frappe.db.sql("""
select sum(ifnull(pr_item.valuation_rate, 0) * ifnull(pr_item.qty, 0) * ifnull(pr_item.conversion_factor, 0))
from `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr
where pr.name = pr_item.parent and pr.docstatus=1 and pr.company=%s
and pr.posting_date <= %s and pr_item.item_code in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(stock_items))), tuple([company, posting_date] + stock_items))[0][0]
pi_valuation_amount = frappe.db.sql("""
select sum(ifnull(pi_item.valuation_rate, 0) * ifnull(pi_item.qty, 0) * ifnull(pi_item.conversion_factor, 0))
from `tabPurchase Invoice Item` pi_item, `tabPurchase Invoice` pi
where pi.name = pi_item.parent and pi.docstatus=1 and pi.company=%s
and pi.posting_date <= %s and pi_item.item_code in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(stock_items))), tuple([company, posting_date] + stock_items))[0][0]
# Balance should be
stock_rbnb = flt(pr_valuation_amount, 2) - flt(pi_valuation_amount, 2)
# Balance as per system
stock_rbnb_account = "Stock Received But Not Billed - " + frappe.db.get_value("Company", company, "abbr")
sys_bal = get_balance_on(stock_rbnb_account, posting_date)
# Amount should be credited
return flt(stock_rbnb) + flt(sys_bal)
def get_outstanding_invoices(amount_query, account, party_type, party):
all_outstanding_vouchers = []
outstanding_voucher_list = frappe.db.sql("""
select
voucher_no, voucher_type, posting_date,
ifnull(sum({amount_query}), 0) as invoice_amount
from
`tabGL Entry`
where
account = %s and party_type=%s and party=%s and {amount_query} > 0
group by voucher_type, voucher_no
""".format(amount_query = amount_query), (account, party_type, party), as_dict = True)
for d in outstanding_voucher_list:
payment_amount = frappe.db.sql("""
select ifnull(sum(ifnull({amount_query}, 0)), 0)
from
`tabGL Entry`
where
account = %s and party_type=%s and party=%s and {amount_query} < 0
and against_voucher_type = %s and ifnull(against_voucher, '') = %s
""".format(**{
"amount_query": amount_query
}), (account, party_type, party, d.voucher_type, d.voucher_no))
payment_amount = -1*payment_amount[0][0] if payment_amount else 0
precision = frappe.get_precision("Sales Invoice", "outstanding_amount")
if d.invoice_amount > payment_amount:
all_outstanding_vouchers.append({
'voucher_no': d.voucher_no,
'voucher_type': d.voucher_type,
'posting_date': d.posting_date,
'invoice_amount': flt(d.invoice_amount, precision),
'outstanding_amount': flt(d.invoice_amount - payment_amount, precision)
})
return all_outstanding_vouchers
| agpl-3.0 |
srikantbmandal/ansible | lib/ansible/modules/utilities/logic/assert.py | 56 | 1766 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: assert
short_description: Asserts given expressions are true
description:
- This module asserts that given expressions are true with an optional custom message.
version_added: "1.5"
options:
that:
description:
- "A string expression of the same form that can be passed to the 'when' statement"
- "Alternatively, a list of string expressions"
required: true
msg:
description:
- "The customized message used for a failing assertion"
required: false
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
- assert: { that: "ansible_os_family != 'RedHat'" }
- assert:
that:
- "'foo' in some_command_result.stdout"
- "number_of_the_counting == 3"
- assert:
that:
- "my_param <= 100"
- "my_param >= 0"
msg: "'my_param' must be between 0 and 100"
'''
| gpl-3.0 |
TeamEOS/external_chromium_org | tools/perf/benchmarks/browsermark.py | 9 | 2240 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Browsermark CSS, DOM, WebGL, JS, resize and page load benchmarks.
Browsermark benchmark suite have five test groups:
a) CSS group: measures your browsers 2D and 3D performance, and finally executes
CSS Crunch test
b) DOM group: measures variety of areas, like how well your browser traverse in
Document Object Model Tree or how fast your browser can create dynamic content
c) General group: measures areas like resize and page load times
d) Graphics group: tests browsers Graphics Processing Unit power by measuring
WebGL and Canvas performance
e) Javascript group: executes number crunching by doing selected Array and
String operations
Additionally Browsermark will test your browsers conformance, but conformance
tests are not included in this suite.
"""
import os
from telemetry import test
from telemetry.page import page_measurement
from telemetry.page import page_set
class _BrowsermarkMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
# Select nearest server(North America=1) and start test.
js_start_test = """
for (var i=0; i < $('#continent a').length; i++) {
if (($('#continent a')[i]).getAttribute('data-id') == '1') {
$('#continent a')[i].click();
$('.start_test.enabled').click();
}
}
"""
tab.ExecuteJavaScript(js_start_test)
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results") != -1', 600)
result = int(tab.EvaluateJavaScript(
'document.getElementsByClassName("score")[0].innerHTML'))
results.Add('Score', 'score', result)
class Browsermark(test.Test):
"""Browsermark suite tests CSS, DOM, resize, page load, WebGL and JS."""
test = _BrowsermarkMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
file_path=os.path.abspath(__file__),
archive_data_file='../page_sets/data/browsermark.json',
make_javascript_deterministic=False)
ps.AddPageWithDefaultRunNavigate('http://browsermark.rightware.com/tests/')
return ps
| bsd-3-clause |
MungoRae/home-assistant | homeassistant/components/media_player/frontier_silicon.py | 16 | 7318 | """
Support for Frontier Silicon Devices (Medion, Hama, Auna,...).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.frontier_silicon/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_PLAY, SUPPORT_SELECT_SOURCE, MediaPlayerDevice, PLATFORM_SCHEMA,
MEDIA_TYPE_MUSIC)
from homeassistant.const import (
STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_UNKNOWN,
CONF_HOST, CONF_PORT, CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['fsapi==0.0.7']
_LOGGER = logging.getLogger(__name__)
SUPPORT_FRONTIER_SILICON = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_STEP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_PLAY | SUPPORT_STOP | SUPPORT_TURN_ON | \
SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
DEFAULT_PORT = 80
DEFAULT_PASSWORD = '1234'
DEVICE_URL = 'http://{0}:{1}/device'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Frontier Silicon platform."""
import requests
if discovery_info is not None:
add_devices(
[FSAPIDevice(discovery_info['ssdp_description'],
DEFAULT_PASSWORD)],
update_before_add=True)
return True
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
password = config.get(CONF_PASSWORD)
try:
add_devices(
[FSAPIDevice(DEVICE_URL.format(host, port), password)],
update_before_add=True)
_LOGGER.debug("FSAPI device %s:%s -> %s", host, port, password)
return True
except requests.exceptions.RequestException:
_LOGGER.error("Could not add the FSAPI device at %s:%s -> %s",
host, port, password)
return False
class FSAPIDevice(MediaPlayerDevice):
"""Representation of a Frontier Silicon device on the network."""
def __init__(self, device_url, password):
"""Initialize the Frontier Silicon API device."""
self._device_url = device_url
self._password = password
self._state = STATE_UNKNOWN
self._name = None
self._title = None
self._artist = None
self._album_name = None
self._mute = None
self._source = None
self._source_list = None
self._media_image_url = None
# Properties
@property
def fs_device(self):
"""
Create a fresh fsapi session.
A new session is created for each request in case someone else
connected to the device in between the updates and invalidated the
existing session (i.e UNDOK).
"""
from fsapi import FSAPI
return FSAPI(self._device_url, self._password)
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def name(self):
"""Return the device name."""
return self._name
@property
def media_title(self):
"""Title of current playing media."""
return self._title
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._album_name
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return SUPPORT_FRONTIER_SILICON
@property
def state(self):
"""Return the state of the player."""
return self._state
# source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Name of the current input source."""
return self._source
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url
def update(self):
"""Get the latest date and update device state."""
fs_device = self.fs_device
if not self._name:
self._name = fs_device.friendly_name
if not self._source_list:
self._source_list = fs_device.mode_list
status = fs_device.play_status
self._state = {
'playing': STATE_PLAYING,
'paused': STATE_PAUSED,
'stopped': STATE_OFF,
'unknown': STATE_UNKNOWN,
None: STATE_OFF,
}.get(status, STATE_UNKNOWN)
info_name = fs_device.play_info_name
info_text = fs_device.play_info_text
self._title = ' - '.join(filter(None, [info_name, info_text]))
self._artist = fs_device.play_info_artist
self._album_name = fs_device.play_info_album
self._source = fs_device.mode
self._mute = fs_device.mute
self._media_image_url = fs_device.play_info_graphics
# Management actions
# power control
def turn_on(self):
"""Turn on the device."""
self.fs_device.power = True
def turn_off(self):
"""Turn off the device."""
self.fs_device.power = False
def media_play(self):
"""Send play command."""
self.fs_device.play()
def media_pause(self):
"""Send pause command."""
self.fs_device.pause()
def media_play_pause(self):
"""Send play/pause command."""
if 'playing' in self._state:
self.fs_device.pause()
else:
self.fs_device.play()
def media_stop(self):
"""Send play/pause command."""
self.fs_device.pause()
def media_previous_track(self):
"""Send previous track command (results in rewind)."""
self.fs_device.prev()
def media_next_track(self):
"""Send next track command (results in fast-forward)."""
self.fs_device.next()
# mute
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._mute
def mute_volume(self, mute):
"""Send mute command."""
self.fs_device.mute = mute
# volume
def volume_up(self):
"""Send volume up command."""
self.fs_device.volume += 1
def volume_down(self):
"""Send volume down command."""
self.fs_device.volume -= 1
def set_volume_level(self, volume):
"""Set volume command."""
self.fs_device.volume = volume
def select_source(self, source):
"""Select input source."""
self.fs_device.mode = source
| apache-2.0 |
mandeepdhami/horizon | openstack_dashboard/dashboards/admin/networks/agents/views.py | 31 | 3014 | # Copyright 2014 Kylincloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.networks.agents \
import forms as project_forms
class AddView(forms.ModalFormView):
form_class = project_forms.AddDHCPAgent
form_id = "add_dhcp_agent_form"
template_name = 'admin/networks/agents/add.html'
success_url = 'horizon:admin:networks:detail'
failure_url = 'horizon:admin:networks:detail'
submit_url = "horizon:admin:networks:adddhcpagent"
title_and_label = _("Add DHCP Agent")
submit_label = modal_header = page_title = title_and_label
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['network_id'],))
def get_context_data(self, **kwargs):
context = super(AddView, self).get_context_data(**kwargs)
context['network_id'] = self.kwargs['network_id']
args = (self.kwargs['network_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['cancel_url'] = reverse(self.failure_url, args=args)
return context
def get_initial(self):
initial = super(AddView, self).get_initial()
agents = self._get_agents()
network_id = self.kwargs['network_id']
try:
network = api.neutron.network_get(self.request, network_id)
initial.update({"network_id": network_id,
"network_name": network.name,
"agents": agents})
return initial
except Exception:
redirect = reverse(self.failure_url,
args=(self.kwargs['network_id'],))
msg = _("Unable to retrieve network.")
exceptions.handle(self.request, msg, redirect=redirect)
@memoized.memoized_method
def _get_agents(self):
try:
return api.neutron.agent_list(self.request,
agent_type='DHCP agent')
except Exception:
redirect = reverse(self.failure_url,
args=(self.kwargs['network_id'],))
msg = _("Unable to retrieve agent list.")
exceptions.handle(self.request, msg, redirect=redirect)
| apache-2.0 |
twitter/pants | src/python/pants/engine/struct.py | 1 | 12732 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from future.utils import binary_type, text_type
from pants.engine.addressable import addressable, addressable_list
from pants.engine.objects import Serializable, SerializableFactory, Validatable, ValidationError
from pants.util.collections_abc_backport import MutableMapping, MutableSequence
from pants.util.objects import SubclassesOf, SuperclassesOf
def _normalize_utf8_keys(kwargs):
"""When kwargs are passed literally in a source file, their keys are ascii: normalize."""
if any(type(key) is binary_type for key in kwargs.keys()):
# This is to preserve the original dict type for kwargs.
dict_type = type(kwargs)
return dict_type([(text_type(k), v) for k, v in kwargs.items()])
return kwargs
class Struct(Serializable, SerializableFactory, Validatable):
"""A serializable object.
A Struct is composed of basic python builtin types and other high-level Structs.
Structs can carry a name in which case they become addressable and can be reused.
"""
# Fields dealing with inheritance.
_INHERITANCE_FIELDS = {'extends', 'merges'}
# The type alias for an instance overwrites any inherited type_alias field.
_TYPE_ALIAS_FIELD = 'type_alias'
# The field that indicates whether a Struct is abstract (and should thus skip validation).
_ABSTRACT_FIELD = 'abstract'
# Fields that should not be inherited.
_UNINHERITABLE_FIELDS = _INHERITANCE_FIELDS | {_TYPE_ALIAS_FIELD, _ABSTRACT_FIELD}
# Fields that are only intended for consumption by the Struct baseclass.
_INTERNAL_FIELDS = _INHERITANCE_FIELDS | {_ABSTRACT_FIELD}
def __init__(self, abstract=False, extends=None, merges=None, type_alias=None, **kwargs):
"""Creates a new struct data blob.
By default Structs are anonymous (un-named), concrete (not `abstract`), and they neither
inherit nor merge another Struct.
Inheritance is allowed via the `extends` and `merges` channels. An object inherits all
attributes from the object it extends, overwriting any attributes in common with the extended
object with its own. The relationship is an "overlay". For the merges, the same rules apply
for as for extends working left to right such that the rightmost merges attribute will overwrite
any similar attribute from merges to its left where the main object does not itself define the
attribute. The primary difference is in handling of lists and dicts. These are merged and not
over-written; again working from left to right with the main object's collection serving as the
seed when present.
A Struct can be semantically abstract without setting `abstract=True`. The `abstract`
value can serve as documentation, or, for subclasses that provide an implementation for
`validate_concrete`, it allows skipping validation for abstract instances.
:param bool abstract: `True` to mark this struct as abstract, in which case no
validation is performed (see `validate_concrete`); `False` by default.
:param extends: The struct instance to inherit field values from. Any shared fields are
over-written with this instances values.
:type extends: An addressed or concrete struct instance that is a type compatible with
this struct or this structs superclasses.
:param merges: The struct instances to merge this instance's field values with. Merging
is like extension except for containers, which are extended instead of replaced;
ie: any `dict` values are updated with this instances items and any `list` values
are extended with this instances items.
:type merges: An addressed or concrete struct instance that is a type compatible with
this struct or this structs superclasses.
:param **kwargs: The struct parameters.
"""
kwargs = _normalize_utf8_keys(kwargs)
self._kwargs = kwargs
self._kwargs['abstract'] = abstract
self._kwargs[self._TYPE_ALIAS_FIELD] = type_alias
self.extends = extends
self.merges = merges
# Allow for structs that are directly constructed in memory. These can have an
# address directly assigned (vs. inferred from name + source file location) and we only require
# that if they do, their name - if also assigned, matches the address.
if self.address:
target_name, _, config_specifier = self.address.target_name.partition('@')
if self.name and self.name != target_name:
self.report_validation_error('Address and name do not match! address: {}, name: {}'
.format(self.address, self.name))
self._kwargs['name'] = target_name
def kwargs(self):
"""Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass.
This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by
SerializableFactory.create and Validatable.validate.
"""
return {k: v for k, v in self._kwargs.items() if k not in self._INTERNAL_FIELDS}
@property
def name(self):
"""Return the name of this object, if any.
In general structs need not be named, in which case they are generally embedded
objects; ie: attributes values of enclosing named structs. Any top-level
struct object, though, will carry a unique name (in the struct object's enclosing
namespace) that can be used to address it.
:rtype: string
"""
return self._kwargs.get('name')
@property
def address(self):
"""Return the address of this object, if any.
In general structs need not be identified by an address, in which case they are
generally embedded objects; ie: attributes values of enclosing named structs.
Any top-level struct, though, will be identifiable via a unique address.
:rtype: :class:`pants.build_graph.address.Address`
"""
return self._kwargs.get('address')
@property
def type_alias(self):
"""Return the type alias this target was constructed via.
For a target read from a BUILD file, this will be target alias, like 'java_library'.
For a target constructed in memory, this will be the simple class name, like 'JavaLibrary'.
The end result is that the type alias should be the most natural way to refer to this target's
type to the author of the target instance.
:rtype: string
"""
type_alias = self._kwargs.get(self._TYPE_ALIAS_FIELD, None)
return type_alias if type_alias is not None else type(self).__name__
@property
def abstract(self):
"""Return `True` if this object has been marked as abstract.
Abstract objects are not validated. See: `validate_concrete`.
:rtype: bool
"""
return self._kwargs.get('abstract', False)
# It only makes sense to inherit a subset of our own fields (we should not inherit new fields!),
# our superclasses logically provide fields within this constrained set.
# NB: Since `Struct` is at base an ~unconstrained struct, a superclass does allow for
# arbitrary and thus more fields to be defined than a subclass might logically support. We
# accept this hole in a trade for generally expected behavior when `Struct` is subclassed
# in the style of constructors with named parameters representing the full complete set of
# expected parameters leaving **kwargs only for use by 'the system'; ie for `type_alias` and
# `address` plumbing for example.
#
# Of note is the fact that we pass a constraint type and not a concrete constraint value. This
# tells addressable to use `SuperclassesOf([Struct instance's type])`, which is what we
# want. Aka, for `StructSubclassA`, the constraint is
# `SuperclassesOf(StructSubclassA)`.
#
@addressable(SuperclassesOf)
def extends(self):
"""Return the object this object extends, if any.
:rtype: :class:`Serializable`
"""
@addressable_list(SuperclassesOf)
def merges(self):
"""Return the objects this object merges in, if any.
:rtype: list of :class:`Serializable`
"""
def _asdict(self):
return self._kwargs
def _extract_inheritable_attributes(self, serializable):
attributes = serializable._asdict().copy()
# Allow for embedded objects inheriting from addressable objects - they should never inherit an
# address and any top-level object inheriting will have its own address.
attributes.pop('address', None)
# We should never inherit special fields - these are for local book-keeping only.
for field in self._UNINHERITABLE_FIELDS:
attributes.pop(field, None)
return attributes
def create(self):
if not (self.extends or self.merges):
return self
# Filter out the attributes that we will consume below for inheritance.
attributes = {k: v for k, v in self._asdict().items()
if k not in self._INHERITANCE_FIELDS and v is not None}
if self.extends:
for k, v in self._extract_inheritable_attributes(self.extends).items():
attributes.setdefault(k, v)
if self.merges:
def merge(attrs):
for k, v in attrs.items():
if isinstance(v, MutableMapping):
mapping = attributes.get(k, {})
mapping.update(v)
attributes[k] = mapping
elif isinstance(v, MutableSequence):
sequence = attributes.get(k, [])
sequence.extend(v)
attributes[k] = sequence
else:
attributes.setdefault(k, v)
for merged in self.merges:
merge(self._extract_inheritable_attributes(merged))
struct_type = type(self)
return struct_type(**attributes)
def validate(self):
if not self.abstract:
self.validate_concrete()
def report_validation_error(self, message):
"""Raises a properly identified validation error.
:param string message: An error message describing the validation error.
:raises: :class:`pants.engine.objects.ValidationError`
"""
raise ValidationError(self.address, message)
def validate_concrete(self):
"""Subclasses can override to implement validation logic.
The object will be fully hydrated state and it's guaranteed the object will be concrete, aka.
not `abstract`. If an error is found in the struct's fields, a validation error should
be raised by calling `report_validation_error`.
:raises: :class:`pants.engine.objects.ValidationError`
"""
def __getattr__(self, item):
if item in self._kwargs:
return self._kwargs[item]
# NB: This call ensures that the default missing attribute behavior happens.
# Without it, AttributeErrors inside @property methods will be misattributed.
return object.__getattribute__(self, item)
def _key(self):
def hashable(value):
if isinstance(value, dict):
return tuple(sorted((k, hashable(v)) for k, v in value.items()))
elif isinstance(value, list):
return tuple(hashable(v) for v in value)
elif isinstance(value, set):
return tuple(sorted(hashable(v) for v in value))
else:
return value
return tuple(sorted((k, hashable(v)) for k, v in self._kwargs.items()
if k not in self._INHERITANCE_FIELDS))
def __hash__(self):
return hash(self._key())
def __eq__(self, other):
return isinstance(other, Struct) and self._key() == other._key()
def __ne__(self, other):
return not (self == other)
def __repr__(self):
classname = type(self).__name__
if self.address:
return '{classname}(address={address})'.format(classname=classname,
address=self.address.reference())
else:
return '{classname}({args})'.format(classname=classname,
args=', '.join(sorted('{}={!r}'.format(k, v)
for k, v in self._kwargs.items()
if v)))
class StructWithDeps(Struct):
"""A subclass of Struct with dependencies."""
def __init__(self, dependencies=None, **kwargs):
"""
:param list dependencies: The direct dependencies of this struct.
"""
# TODO: enforce the type of variants using the Addressable framework.
super(StructWithDeps, self).__init__(**kwargs)
self.dependencies = dependencies
@addressable_list(SubclassesOf(Struct))
def dependencies(self):
"""The direct dependencies of this target.
:rtype: list
"""
| apache-2.0 |
rbberger/lammps | tools/i-pi/ipi/engine/thermostats.py | 8 | 34424 | """Contains the classes that deal with constant temperature dynamics.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Contains the algorithms which propagate the thermostatting steps in the constant
temperature ensembles. Includes the new GLE thermostat, which can be used to
run PI+GLE dynamics, reducing the number of path integral beads required.
Classes:
Thermostat: Base thermostat class with the generic methods and attributes.
ThermoLangevin: Holds the algorithms for a langevin thermostat.
ThermoPILE_L: Holds the algorithms for a path-integral langevin equation
thermostat, with a thermostat coupled directly to the
centroid coordinate of each bead.
ThermoPILE_G: Holds the algorithms for a path-integral langevin equation
thermostat, with a thermostat coupled to the kinetic energy for
the entire system.
ThermoSVR: Holds the algorithms for a stochastic velocity rescaling
thermostat.
ThermoGLE: Holds the algorithms for a generalized langevin equation
thermostat.
ThermoNMGLE: Holds the algorithms for a generalized langevin equation
thermostat in the normal mode representation.
ThermoNMGLEG: Holds the algorithms for a generalized langevin equation
thermostat in the normal mode representation, with kinetic energy as
well as potential energy sampling optimization.
"""
__all__ = ['Thermostat', 'ThermoLangevin', 'ThermoPILE_L', 'ThermoPILE_G',
'ThermoSVR', 'ThermoGLE', 'ThermoNMGLE', 'ThermoNMGLEG']
import numpy as np
from ipi.utils.depend import *
from ipi.utils.units import *
from ipi.utils.mathtools import matrix_exp, stab_cholesky, root_herm
from ipi.utils.prng import Random
from ipi.utils.messages import verbosity, warning, info
from ipi.engine.beads import Beads
from ipi.engine.normalmodes import NormalModes
class Thermostat(dobject):
"""Base thermostat class.
Gives the standard methods and attributes needed in all the thermostat
classes.
Attributes:
prng: A pseudo random number generator object.
ndof: The number of degrees of freedom that the thermostat will be
attached to.
Depend objects:
dt: The time step used in the algorithms. Depends on the simulation dt.
temp: The simulation temperature. Higher than the system temperature by
a factor of the number of beads. Depends on the simulation temp.
ethermo: The total energy exchanged with the bath due to the thermostat.
p: The momentum vector that the thermostat is coupled to. Depends on the
beads p object.
m: The mass vector associated with p. Depends on the beads m object.
sm: The square root of the mass vector.
"""
def __init__(self, temp = 1.0, dt = 1.0, ethermo=0.0):
"""Initializes Thermostat.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialized from a checkpoint file.
"""
dset(self,"temp", depend_value(name='temp', value=temp))
dset(self,"dt", depend_value(name='dt', value=dt))
dset(self,"ethermo",depend_value(name='ethermo',value=ethermo))
def bind(self, beads=None, atoms=None, pm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Args:
beads: An optional beads object to take the mass and momentum vectors
from.
atoms: An optional atoms object to take the mass and momentum vectors
from.
pm: An optional tuple containing a single momentum value and its
conjugate mass.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no appropriate degree of freedom or object
containing a momentum vector is specified for
the thermostat to couple to.
"""
if prng is None:
warning("Initializing thermostat from standard random PRNG", verbosity.medium)
self.prng = Random()
else:
self.prng = prng
if not beads is None:
dset(self,"p",beads.p.flatten())
dset(self,"m",beads.m3.flatten())
elif not atoms is None:
dset(self,"p",dget(atoms, "p"))
dset(self,"m",dget(atoms, "m3"))
elif not pm is None:
dset(self,"p",pm[0])
dset(self,"m",pm[1])
else:
raise TypeError("Thermostat.bind expects either Beads, Atoms, NormalModes, or a (p,m) tuple to bind to")
if fixdof is None:
self.ndof = len(self.p)
else:
self.ndof = float(len(self.p) - fixdof)
dset(self, "sm",
depend_array(name="sm", value=np.zeros(len(dget(self,"m"))),
func=self.get_sm, dependencies=[dget(self,"m")]))
def get_sm(self):
"""Retrieves the square root of the mass matrix.
Returns:
A vector of the square root of the mass matrix with one value for
each degree of freedom.
"""
return np.sqrt(self.m)
def step(self):
"""Dummy thermostat step."""
pass
class ThermoLangevin(Thermostat):
"""Represents a langevin thermostat.
Depend objects:
tau: Thermostat damping time scale. Larger values give a less strongly
coupled thermostat.
T: Coefficient of the diffusive contribution of the thermostat, i.e. the
drift back towards equilibrium. Depends on tau and the time step.
S: Coefficient of the stochastic contribution of the thermostat, i.e.
the uncorrelated Gaussian noise. Depends on T and the temperature.
"""
def get_T(self):
"""Calculates the coefficient of the overall drift of the velocities."""
return np.exp(-0.5*self.dt/self.tau)
def get_S(self):
"""Calculates the coefficient of the white noise."""
return np.sqrt(Constants.kb*self.temp*(1 - self.T**2))
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0):
"""Initializes ThermoLangevin.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The thermostat damping timescale. Defaults to 1.0.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialized from a checkpoint file.
"""
super(ThermoLangevin,self).__init__(temp, dt, ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
dset(self,"T",
depend_value(name="T",func=self.get_T,
dependencies=[dget(self,"tau"), dget(self,"dt")]))
dset(self,"S",
depend_value(name="S",func=self.get_S,
dependencies=[dget(self,"temp"), dget(self,"T")]))
def step(self):
"""Updates the bound momentum vector with a langevin thermostat."""
p = depstrip(self.p).copy()
sm = depstrip(self.sm)
p /= sm
self.ethermo += np.dot(p,p)*0.5
p *= self.T
p += self.S*self.prng.gvec(len(p))
self.ethermo -= np.dot(p,p)*0.5
p *= sm
self.p = p
class ThermoPILE_L(Thermostat):
"""Represents a PILE thermostat with a local centroid thermostat.
Attributes:
_thermos: The list of the different thermostats for all the ring polymer
normal modes.
nm: A normal modes object to attach the thermostat to.
prng: Random number generator used in the stochastic integration
algorithms.
Depend objects:
tau: Centroid thermostat damping time scale. Larger values give a
less strongly coupled centroid thermostat.
tauk: Thermostat damping time scale for the non-centroid normal modes.
Depends on the ring polymer spring constant, and thus the simulation
temperature.
pilescale: A float used to reduce the intensity of the PILE thermostat if
required.
"""
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0, scale=1.0):
"""Initializes ThermoPILE_L.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The centroid thermostat damping timescale. Defaults to 1.0.
ethermo: The initial conserved energy quantity. Defaults to 0.0. Will
be non-zero if the thermostat is initialized from a checkpoint file.
scale: A float used to reduce the intensity of the PILE thermostat if
required.
Raises:
TypeError: Raised if the thermostat is used with any object other than
a beads object, so that we make sure that the objects needed for the
normal mode transformation exist.
"""
super(ThermoPILE_L,self).__init__(temp,dt,ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
dset(self,"pilescale",depend_value(value=scale,name='pilescale'))
def bind(self, nm=None, prng=None, bindcentroid=True, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes a beads object with degrees of freedom, and makes its momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Gives the interface for both the PILE_L and PILE_G thermostats, which
only differ in their treatment of the centroid coordinate momenta.
Args:
nm: An optional normal mode object to take the mass and momentum
vectors from.
prng: An optional pseudo random number generator object. Defaults to
Random().
bindcentroid: An optional boolean which decides whether a Langevin
thermostat is attached to the centroid mode of each atom
separately, or the total kinetic energy. Defaults to True, which
gives a thermostat bound to each centroid momentum.
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no appropriate degree of freedom or object
containing a momentum vector is specified for
the thermostat to couple to.
"""
if nm is None or not type(nm) is NormalModes:
raise TypeError("ThermoPILE_L.bind expects a NormalModes argument to bind to")
if prng is None:
self.prng = Random()
else:
self.prng = prng
prev_ethermo = self.ethermo
# creates a set of thermostats to be applied to individual normal modes
self._thermos = [ ThermoLangevin(temp=1, dt=1, tau=1) for b in range(nm.nbeads) ]
# optionally does not bind the centroid, so we can re-use all of this
# in the PILE_G case
if not bindcentroid:
self._thermos[0] = None
self.nm = nm
dset(self,"tauk",
depend_array(name="tauk", value=np.zeros(nm.nbeads-1,float),
func=self.get_tauk, dependencies=[dget(self,"pilescale"), dget(nm,"dynomegak")] ) )
# must pipe all the dependencies in such a way that values for the nm thermostats
# are automatically updated based on the "master" thermostat
def make_taugetter(k):
return lambda: self.tauk[k-1]
it = 0
for t in self._thermos:
if t is None:
it += 1
continue
if it > 0:
fixdof = None # only the centroid thermostat may have constraints
# bind thermostat t to the it-th bead
t.bind(pm=(nm.pnm[it,:],nm.dynm3[it,:]),prng=self.prng, fixdof=fixdof)
# pipes temp and dt
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
# for tau it is slightly more complex
if it == 0:
deppipe(self,"tau", t, "tau")
else:
# Here we manually connect _thermos[i].tau to tauk[i].
# Simple and clear.
dget(t,"tau").add_dependency(dget(self,"tauk"))
dget(t,"tau")._func = make_taugetter(it)
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
it += 1
# since the ethermo will be "delegated" to the normal modes thermostats,
# one has to split
# any previously-stored value between the sub-thermostats
if bindcentroid:
for t in self._thermos:
t.ethermo = prev_ethermo/nm.nbeads
dget(self,"ethermo")._func = self.get_ethermo;
# if we are not binding the centroid just yet, this bit of the piping
# is delegated to the function which is actually calling this
def get_tauk(self):
"""Computes the thermostat damping time scale for the non-centroid
normal modes.
Returns:
An array with the damping time scales for the non-centroid modes.
"""
# Also include an optional scaling factor to reduce the intensity of NM thermostats
return np.array([ self.pilescale/(2*self.nm.dynomegak[k]) for k in range(1,len(self._thermos)) ])
def get_ethermo(self):
"""Computes the total energy transferred to the heat bath for all the
thermostats.
"""
et = 0.0;
for t in self._thermos:
et += t.ethermo
return et
def step(self):
"""Updates the bound momentum vector with a PILE thermostat."""
# super-cool! just loop over the thermostats! it's as easy as that!
for t in self._thermos:
t.step()
class ThermoSVR(Thermostat):
"""Represents a stochastic velocity rescaling thermostat.
Depend objects:
tau: Centroid thermostat damping time scale. Larger values give a
less strongly coupled centroid thermostat.
K: Scaling factor for the total kinetic energy. Depends on the
temperature.
et: Parameter determining the strength of the thermostat coupling.
Depends on tau and the time step.
"""
def get_et(self):
"""Calculates the damping term in the propagator."""
return np.exp(-0.5*self.dt/self.tau)
def get_K(self):
"""Calculates the average kinetic energy per degree of freedom."""
return Constants.kb*self.temp*0.5
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0):
"""Initializes ThermoSVR.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The thermostat damping timescale. Defaults to 1.0.
ethermo: The initial conserved energy quantity. Defaults to 0.0. Will
be non-zero if the thermostat is initialized from a checkpoint file.
"""
super(ThermoSVR,self).__init__(temp,dt,ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
dset(self,"et",
depend_value(name="et",func=self.get_et,
dependencies=[dget(self,"tau"), dget(self,"dt")]))
dset(self,"K",
depend_value(name="K",func=self.get_K, dependencies=[dget(self,"temp")]))
def step(self):
"""Updates the bound momentum vector with a stochastic velocity rescaling
thermostat. See G Bussi, D Donadio, M Parrinello,
Journal of Chemical Physics 126, 014101 (2007)
"""
K = np.dot(depstrip(self.p),depstrip(self.p)/depstrip(self.m))*0.5
# rescaling is un-defined if the KE is zero
if K == 0.0:
return
# gets the stochastic term (basically a Gamma distribution for the kinetic energy)
r1 = self.prng.g
if (self.ndof-1)%2 == 0:
rg = 2.0*self.prng.gamma((self.ndof-1)/2)
else:
rg = 2.0*self.prng.gamma((self.ndof-2)/2) + self.prng.g**2
alpha2 = self.et + self.K/K*(1 - self.et)*(r1**2 + rg) + 2.0*r1*np.sqrt(self.K/K*self.et*(1 - self.et))
alpha = np.sqrt(alpha2)
if (r1 + np.sqrt(2*K/self.K*self.et/(1 - self.et))) < 0:
alpha *= -1
self.ethermo += K*(1 - alpha2)
self.p *= alpha
class ThermoPILE_G(ThermoPILE_L):
"""Represents a PILE thermostat with a global centroid thermostat.
Simply replaces the Langevin thermostat for the centroid normal mode with
a global velocity rescaling thermostat.
"""
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0, scale = 1.0):
"""Initializes ThermoPILE_G.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The centroid thermostat damping timescale. Defaults to 1.0.
ethermo: The initial conserved energy quantity. Defaults to 0.0. Will
be non-zero if the thermostat is initialized from a checkpoint file.
scale: A float used to reduce the intensity of the PILE thermostat if
required.
"""
super(ThermoPILE_G,self).__init__(temp,dt,tau,ethermo)
dset(self,"pilescale",depend_value(value=scale,name='pilescale'))
def bind(self, nm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes a beads object with degrees of freedom, and makes its momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Uses the PILE_L bind interface, with bindcentroid set to false so we can
specify that thermostat separately, by binding a global
thermostat to the centroid mode.
Args:
beads: An optional beads object to take the mass and momentum vectors
from.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
"""
# first binds as a local PILE, then substitutes the thermostat on the centroid
prev_ethermo = self.ethermo
super(ThermoPILE_G,self).bind(nm=nm,prng=prng,bindcentroid=False, fixdof=fixdof)
#centroid thermostat
self._thermos[0] = ThermoSVR(temp=1, dt=1, tau=1)
t = self._thermos[0]
t.bind(pm=(nm.pnm[0,:],nm.dynm3[0,:]),prng=self.prng, fixdof=fixdof)
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
deppipe(self,"tau", t, "tau")
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
# splits any previous ethermo between the thermostats, and finishes to bind ethermo to the sum function
for t in self._thermos:
t.ethermo = prev_ethermo/nm.nbeads
dget(self,"ethermo")._func = self.get_ethermo;
class ThermoGLE(Thermostat):
"""Represents a GLE thermostat.
This is similar to a langevin thermostat, in that it uses Gaussian random
numbers to simulate a heat bath acting on the system, but simulates a
non-Markovian system by using a Markovian formulation in an extended phase
space. This allows for a much greater degree of flexibility, and this
thermostat, properly fitted, can give the an approximation to the correct
quantum ensemble even for a classical, 1-bead simulation. More reasonably,
using this thermostat allows for a far smaller number of replicas of the
system to be used, as the convergence of the properties
of the system is accelerated with respect to number of beads when PI+GLE
are used in combination. (See M. Ceriotti, D. E. Manolopoulos, M. Parinello,
J. Chem. Phys. 134, 084104 (2011)).
Attributes:
ns: The number of auxiliary degrees of freedom.
s: An array holding all the momenta, including the ones for the
auxiliary degrees of freedom.
Depend objects:
A: Drift matrix giving the damping time scales for all the different
degrees of freedom.
C: Static covariance matrix.
Satisfies A.C + C.transpose(A) = B.transpose(B), where B is the
diffusion matrix, giving the strength of the coupling of the system
with the heat bath, and thus the size of the stochastic
contribution of the thermostat.
T: Matrix for the diffusive contribution of the thermostat, i.e. the
drift back towards equilibrium. Depends on A and the time step.
S: Matrix for the stochastic contribution of the thermostat, i.e.
the uncorrelated Gaussian noise. Depends on C and T.
"""
def get_T(self):
"""Calculates the matrix for the overall drift of the velocities."""
return matrix_exp(-0.5*self.dt*self.A)
def get_S(self):
"""Calculates the matrix for the colored noise."""
SST = Constants.kb*(self.C - np.dot(self.T,np.dot(self.C,self.T.T)))
# Uses a symmetric decomposition rather than Cholesky, since it is more stable
return root_herm(SST)
def get_C(self):
"""Calculates C from temp (if C is not set explicitly)"""
rC = np.identity(self.ns + 1,float)*self.temp
return rC[:]
def __init__(self, temp = 1.0, dt = 1.0, A = None, C = None, ethermo=0.0):
"""Initializes ThermoGLE.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
A: An optional matrix giving the drift matrix. Defaults to a single
value of 1.0.
C: An optional matrix giving the covariance matrix. Defaults to an
identity matrix times temperature with the same dimensions as the
total number of degrees of freedom in the system.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialized from a checkpoint file.
"""
super(ThermoGLE,self).__init__(temp,dt,ethermo)
if A is None:
A = np.identity(1,float)
dset(self,"A",depend_value(value=A.copy(),name='A'))
self.ns = len(self.A) - 1;
# now, this is tricky. if C is taken from temp, then we want it to be updated
# as a depend of temp. Otherwise, we want it to be an independent beast.
if C is None:
C = np.identity(self.ns+1,float)*self.temp
dset(self,"C",
depend_value(name='C', func=self.get_C,
dependencies=[dget(self,"temp")]))
else:
dset(self,"C",depend_value(value=C.copy(),name='C'))
dset(self,"T",
depend_value(name="T",func=self.get_T,
dependencies=[dget(self,"A"), dget(self,"dt")]))
dset(self,"S",
depend_value(name="S",func=self.get_S,
dependencies=[dget(self,"C"), dget(self,"T")]))
self.s = np.zeros(0)
def bind(self, beads=None, atoms=None, pm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Args:
beads: An optional beads object to take the mass and momentum vectors
from.
atoms: An optional atoms object to take the mass and momentum vectors
from.
pm: An optional tuple containing a single momentum value and its
conjugate mass.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no appropriate degree of freedom or object
containing a momentum vector is specified for
the thermostat to couple to.
"""
super(ThermoGLE,self).bind(beads,atoms,pm,prng,fixdof)
# allocates, initializes or restarts an array of s's
if self.s.shape != (self.ns + 1, len(dget(self,"m"))):
if len(self.s) > 0:
warning("Mismatch in GLE s array size on restart, will reinitialize to free particle.", verbosity.low)
self.s = np.zeros((self.ns + 1, len(dget(self,"m"))))
# Initializes the s vector in the free-particle limit
info(" GLE additional DOFs initialized to the free-particle limit.", verbosity.low)
SC = stab_cholesky(self.C*Constants.kb)
self.s[:] = np.dot(SC, self.prng.gvec(self.s.shape))
else:
info("GLE additional DOFs initialized from input.", verbosity.medium)
def step(self):
"""Updates the bound momentum vector with a GLE thermostat"""
p = depstrip(self.p).copy()
self.s[0,:] = self.p/self.sm
self.ethermo += np.dot(self.s[0],self.s[0])*0.5
self.s[:] = np.dot(self.T,self.s) + np.dot(self.S,self.prng.gvec(self.s.shape))
self.ethermo -= np.dot(self.s[0],self.s[0])*0.5
self.p = self.s[0]*self.sm
class ThermoNMGLE(Thermostat):
"""Represents a 'normal-modes' GLE thermostat.
An extension to the GLE thermostat which is applied in the
normal modes representation, and which allows to use a different
GLE for each normal mode
Attributes:
ns: The number of auxiliary degrees of freedom.
nb: The number of beads.
s: An array holding all the momenta, including the ones for the
auxiliary degrees of freedom.
Depend objects:
A: Drift matrix giving the damping time scales for all the different
degrees of freedom (must contain nb terms).
C: Static covariance matrix.
Satisfies A.C + C.transpose(A) = B.transpose(B), where B is the
diffusion matrix, giving the strength of the coupling of the system
with the heat bath, and thus the size of the stochastic
contribution of the thermostat.
"""
def get_C(self):
"""Calculates C from temp (if C is not set explicitly)."""
rv = np.ndarray((self.nb, self.ns+1, self.ns+1), float)
for b in range(0,self.nb):
rv[b] = np.identity(self.ns + 1,float)*self.temp
return rv[:]
def __init__(self, temp = 1.0, dt = 1.0, A = None, C = None, ethermo=0.0):
"""Initializes ThermoGLE.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
A: An optional matrix giving the drift matrix. Defaults to a single
value of 1.0.
C: An optional matrix giving the covariance matrix. Defaults to an
identity matrix times temperature with the same dimensions as the
total number of degrees of freedom in the system.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialized from a checkpoint file.
"""
super(ThermoNMGLE,self).__init__(temp,dt,ethermo)
if A is None:
A = np.identity(1,float)
dset(self,"A",depend_value(value=A.copy(),name='A'))
self.nb = len(self.A)
self.ns = len(self.A[0]) - 1;
# now, this is tricky. if C is taken from temp, then we want it to be
# updated as a depend of temp.
# Otherwise, we want it to be an independent beast.
if C is None:
dset(self,"C",depend_value(name='C', func=self.get_C, dependencies=[dget(self,"temp")]))
else:
dset(self,"C",depend_value(value=C.copy(),name='C'))
def bind(self, nm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network. Actually, this specific thermostat requires
being called on a beads object.
Args:
nm: An optional normal modes object to take the mass and momentum
vectors from.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no beads object is specified for
the thermostat to couple to.
"""
if nm is None or not type(nm) is NormalModes:
raise TypeError("ThermoNMGLE.bind expects a NormalModes argument to bind to")
if prng is None:
self.prng = Random()
else:
self.prng = prng
if (nm.nbeads != self.nb):
raise IndexError("The parameters in nm_gle options correspond to a bead number "+str(self.nb)+ " which does not match the number of beads in the path" + str(nm.nbeads) )
# allocates, initializes or restarts an array of s's
if self.s.shape != (self.nb, self.ns + 1, nm.natoms *3) :
if len(self.s) > 0:
warning("Mismatch in GLE s array size on restart, will reinitialize to free particle.", verbosity.low)
self.s = np.zeros((self.nb, self.ns + 1, nm.natoms*3))
# Initializes the s vector in the free-particle limit
info(" GLE additional DOFs initialized to the free-particle limit.", verbosity.low)
for b in range(self.nb):
SC = stab_cholesky(self.C[b]*Constants.kb)
self.s[b] = np.dot(SC, self.prng.gvec(self.s[b].shape))
else:
info("GLE additional DOFs initialized from input.", verbosity.medium)
prev_ethermo = self.ethermo
# creates a set of thermostats to be applied to individual normal modes
self._thermos = [ThermoGLE(temp=1, dt=1, A=self.A[b], C=self.C[b]) for b in range(self.nb)]
# must pipe all the dependencies in such a way that values for the nm
# thermostats are automatically updated based on the "master" thermostat
def make_Agetter(k):
return lambda: self.A[k]
def make_Cgetter(k):
return lambda: self.C[k]
it = 0
for t in self._thermos:
t.s = self.s[it] # gets the s's as a slice of self.s
t.bind(pm=(nm.pnm[it,:],nm.dynm3[it,:]), prng=self.prng) # bind thermostat t to the it-th normal mode
# pipes temp and dt
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
# here we pipe the A and C of individual NM to the "master" arrays
dget(t,"A").add_dependency(dget(self,"A"))
dget(t,"A")._func = make_Agetter(it)
dget(t,"C").add_dependency(dget(self,"C"))
dget(t,"C")._func = make_Cgetter(it)
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
it += 1
# since the ethermo will be "delegated" to the normal modes thermostats,
# one has to split
# any previously-stored value between the sub-thermostats
for t in self._thermos:
t.ethermo = prev_ethermo/self.nb
dget(self,"ethermo")._func = self.get_ethermo;
def step(self):
"""Updates the thermostat in NM representation by looping over the
individual DOFs.
"""
for t in self._thermos:
t.step()
def get_ethermo(self):
"""Computes the total energy transferred to the heat bath for all the nm
thermostats.
"""
et = 0.0;
for t in self._thermos:
et += t.ethermo
return et
class ThermoNMGLEG(ThermoNMGLE):
"""Represents a 'normal-modes' GLE thermostat + SVR.
An extension to the above NMGLE thermostat which also adds a stochastic velocity
rescaling to the centroid.
Depend objects:
tau: Thermostat damping time scale. Larger values give a less strongly
coupled thermostat.
"""
def __init__(self, temp = 1.0, dt = 1.0, A = None, C = None, tau=1.0, ethermo=0.0):
super(ThermoNMGLEG,self).__init__(temp, dt, A, C, ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
def bind(self, nm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network. Actually, this specific thermostat requires
being called on a beads object.
Args:
nm: An optional normal modes object to take the mass and momentum
vectors from.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
"""
super(ThermoNMGLEG,self).bind(nm, prng, fixdof)
t = ThermoSVR(self.temp, self.dt, self.tau)
t.bind(pm=(nm.pnm[0,:],nm.dynm3[0,:]), prng=self.prng) # bind global thermostat to centroid
# pipes temp and dt
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
deppipe(self,"tau", t, "tau")
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
self._thermos.append(t)
| gpl-2.0 |
xuhaibahmad/PELL | pell/actions/email_action.py | 1 | 1686 | import smtplib
import time
from pell.assistant.text_to_speech import talk, listen
import sys
import os
class EmailAction:
def __init__(self, command, subject=None, message=None, receiver=None):
self.command = command
self.subject = subject
self.message = message
self.receiver = receiver
self.email = os.getenv("gmail_email")
self.password = os.getenv("gmail_password")
def execute(self):
# Ask for recepient only when not provided in the command
if self.receiver == None:
talk('Who do you want to send it to?')
time.sleep(3)
self.receiver = listen()
# Ask for subject only when not provided in the command
if self.subject == None:
talk('What is the subject?')
time.sleep(3)
self.subject = listen()
# Ask for message only when not provided in the command
if self.message == None:
talk('What should I say?')
time.sleep(3)
self.message = listen()
content = 'Subject: {}\n\n{}'.format(self.subject, self.message)
mail = smtplib.SMTP('smtp.gmail.com', 587)
mail.ehlo()
mail.starttls()
mail.login(self.email, self.password)
# send message
mail.sendmail(self.email, self.receiver, content)
# end mail connection
mail.close()
talk('Email sent.')
sys.exit('Email sent.')
if __name__ == "__main__":
test_email = os.getenv("test_email")
EmailAction(
"Send email to myself",
"PELL",
"This is a test message from PELL",
test_email
).execute()
| gpl-3.0 |
alexandru-g/kernel_htc_m8_gpe | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
nickng/hexacopter-python | robovero/lpc17xx_pinsel.py | 2 | 4573 | """Pin select client library functions. Find implementation details in LPC17xx
CMSIS-Compliant Standard Peripheral Firmware Driver Library documentation.
"""
from internals import robocaller, cstruct
__author__ = "Neil MacMunn"
__credits__ = ["Neil MacMunn", "NXP MCU SW Application Team"]
__maintainer__ = "Neil MacMunn"
__email__ = "neil@gumstix.com"
__copyright__ = "Copyright 2011, Gumstix Inc"
__license__ = "BSD 2-Clause"
__version__ = "0.1"
# Macros define for PORT Selection
# PORT 0
PINSEL_PORT_0 = ((0))
# PORT 1
PINSEL_PORT_1 = ((1))
# PORT 2
PINSEL_PORT_2 = ((2))
# PORT 3
PINSEL_PORT_3 = ((3))
# PORT 4
PINSEL_PORT_4 = ((4))
# Macros define for Pin Function selection
# default function
PINSEL_FUNC_0 = ((0))
# first alternate function
PINSEL_FUNC_1 = ((1))
# second alternate function
PINSEL_FUNC_2 = ((2))
# third or reserved alternate function
PINSEL_FUNC_3 = ((3))
# Macros define for Pin Number of Port
# Pin 0
PINSEL_PIN_0 = ((0))
# Pin 1
PINSEL_PIN_1 = ((1))
# Pin 2
PINSEL_PIN_2 = ((2))
# Pin 3
PINSEL_PIN_3 = ((3))
# Pin 4
PINSEL_PIN_4 = ((4))
# Pin 5
PINSEL_PIN_5 = ((5))
# Pin 6
PINSEL_PIN_6 = ((6))
# Pin 7
PINSEL_PIN_7 = ((7))
# Pin 8
PINSEL_PIN_8 = ((8))
# Pin 9
PINSEL_PIN_9 = ((9))
# Pin 10
PINSEL_PIN_10 = ((10))
# Pin 11
PINSEL_PIN_11 = ((11))
# Pin 12
PINSEL_PIN_12 = ((12))
# Pin 13
PINSEL_PIN_13 = ((13))
# Pin 14
PINSEL_PIN_14 = ((14))
# Pin 15
PINSEL_PIN_15 = ((15))
# Pin 16
PINSEL_PIN_16 = ((16))
# Pin 17
PINSEL_PIN_17 = ((17))
# Pin 18
PINSEL_PIN_18 = ((18))
# Pin 19
PINSEL_PIN_19 = ((19))
# Pin 20
PINSEL_PIN_20 = ((20))
# Pin 21
PINSEL_PIN_21 = ((21))
# Pin 22
PINSEL_PIN_22 = ((22))
# Pin 23
PINSEL_PIN_23 = ((23))
# Pin 24
PINSEL_PIN_24 = ((24))
# Pin 25
PINSEL_PIN_25 = ((25))
# Pin 26
PINSEL_PIN_26 = ((26))
# Pin 27
PINSEL_PIN_27 = ((27))
# Pin 28
PINSEL_PIN_28 = ((28))
# Pin 29
PINSEL_PIN_29 = ((29))
# Pin 30
PINSEL_PIN_30 = ((30))
# Pin 31
PINSEL_PIN_31 = ((31))
# Macros define for Pin mode
# PINSEL_PINMODE_PULLUP
PINSEL_PINMODE_PULLUP = ((0))
# Tri-state
PINSEL_PINMODE_TRISTATE = ((2))
# Internal pull-down resistor
PINSEL_PINMODE_PULLDOWN = ((3))
# Pin is in the normal (not open drain) mode
PINSEL_PINMODE_NORMAL = ((0))
# Pin is in the open drain mode
PINSEL_PINMODE_OPENDRAIN = ((1))
# Macros define for I2C mode
# The standard drive mode
PINSEL_I2C_Normal_Mode = ((0))
# Fast Mode Plus drive mode
PINSEL_I2C_Fast_Mode = ((1))
class PINSEL_CFG_Type(cstruct):
'''Pin configuration structure.
Portnum: Port Number, should be PINSEL_PORT_x, where x should be in range
from 0 to 4
Pinnum: Pin Number, should be PINSEL_PIN_x, where x should be in range from 0
to 31
Funcnum: Function Number, should be PINSEL_FUNC_x, where x should be in range
from 0 to 3
Pinmode: Pin Mode, should be:
PINSEL_PINMODE_PULLUP: Internal pull-up resistor
PINSEL_PINMODE_TRISTATE: Tri-state
PINSEL_PINMODE_PULLDOWN: Internal pull-down resistor
OpenDrain: OpenDrain mode, should be:
PINSEL_PINMODE_NORMAL: Pin is in the normal (not open drain) mode
PINSEL_PINMODE_OPENDRAIN: Pin is in the open drain mode
ptr: LPC1769 memory address where structure is stored. Use this in place of
the C reference operator (&).
'''
pass
def PINSEL_SetI2C0Pins(i2cPinMode, filterSlewRateEnable):
'''Setup I2C0 pins.
i2cPinMode: I2C pin mode, should be one of the following:
PINSEL_I2C_Normal_Mode : The standard drive mode
PINSEL_I2C_Fast_Mode : Fast Mode Plus drive mode
filterSlewRateEnable: should be:
ENABLE: Enable filter and slew rate.
DISABLE: Disable filter and slew rate.
'''
return robocaller("PINSEL_SetI2C0Pins", "void", i2cPinMode, filterSlewRateEnable)
def PINSEL_ConfigPin(PinCfg):
'''Configure Pin corresponding to specified parameters passed in the PinCfg.
PinCfg: Pointer to a PINSEL_CFG_Type structure that contains the configuration
information for the specified pin.
'''
return robocaller("PINSEL_ConfigPin", "void", PinCfg)
def PINSEL_ConfigTraceFunc(NewState):
'''Configure trace function.
NewState: should be one of the following:
ENABLE : Enable Trace Function
DISABLE : Disable Trace Function
'''
return robocaller("PINSEL_ConfigTraceFunc", "void", NewState)
| bsd-2-clause |
TrimBiggs/calico | calico/felix/test/test_frules.py | 1 | 21852 | # -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
# Copyright 2015 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables rules generation function.
"""
import logging
from mock import Mock, patch, call
from netaddr import IPAddress
from calico.felix import frules
from calico.felix.fiptables import IptablesUpdater
from calico.felix.futils import FailedSystemCall, IPV4
from calico.felix.test.base import BaseTestCase, load_config
_log = logging.getLogger(__name__)
class TestRules(BaseTestCase):
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_rules(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_set_ips = m_devices.set_interface_ips
env_dict = {
"FELIX_ETCDADDR": "localhost:4001",
"FELIX_HOSTNAME": "myhost",
"FELIX_INTERFACEPREFIX": "tap",
"FELIX_METADATAADDR": "123.0.0.1",
"FELIX_METADATAPORT": "1234",
"FELIX_IPINIPENABLED": "True",
"FELIX_IPINIPMTU": "1480",
"FELIX_DEFAULTENDPOINTTOHOSTACTION": "RETURN"
}
config = load_config("felix_missing.cfg", env_dict=env_dict)
config.IP_IN_IP_ADDR = IPAddress("10.0.0.1")
m_v4_upd = Mock(spec=IptablesUpdater)
m_v6_upd = Mock(spec=IptablesUpdater)
m_v6_raw_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
m_v6_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(config, m_v4_upd, m_v4_nat_upd,
ip_version=4)
frules.install_global_rules(config, m_v6_upd, m_v6_nat_upd,
ip_version=6, raw_updater=m_v6_raw_upd)
self.assertEqual(
m_v4_nat_upd.ensure_rule_inserted.mock_calls,
[
call("POSTROUTING --out-interface tunl0 "
"-m addrtype ! --src-type LOCAL --limit-iface-out "
"-m addrtype --src-type LOCAL "
"-j MASQUERADE",
async=False),
call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False)
]
)
m_v4_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT',
'--append felix-PREROUTING --protocol tcp --dport 80 --destination '
'169.254.169.254/32 --jump DNAT --to-destination 123.0.0.1:1234'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v4_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])},
async=False
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-INPUT': set(['felix-FROM-ENDPOINT']),
'felix-FORWARD': set(['felix-FROM-ENDPOINT',
'felix-TO-ENDPOINT'])},
async=False
)
self.assertEqual(
m_v6_nat_upd.ensure_rule_inserted.mock_calls,
[
call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False),
]
)
m_v6_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v6_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains, {
'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])
}, async=False
)
m_v6_raw_upd.rewrite_chains.assert_called_once_with(
{'felix-PREROUTING': [
'--append felix-PREROUTING --jump DROP -m comment '
'--comment "IPv6 rpfilter failed"'
]},
{
'felix-PREROUTING': {}
},
async=False
)
m_ipset.ensure_exists.assert_called_once_with()
self.assertEqual(
m_check_call.mock_calls,
[
call(["ip", "tunnel", "add", "tunl0", "mode", "ipip"]),
call(["ip", "link", "set", "tunl0", "mtu", "1480"]),
call(["ip", "link", "set", "tunl0", "up"]),
]
)
self.assertEqual(
m_set_ips.mock_calls,
[call(IPV4, "tunl0", set([IPAddress("10.0.0.1")]))]
)
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_ipip_disabled(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_set_ips = m_devices.set_interface_ips
env_dict = {
"FELIX_ETCDADDR": "localhost:4001",
"FELIX_HOSTNAME": "myhost",
"FELIX_INTERFACEPREFIX": "tap",
"FELIX_METADATAADDR": "123.0.0.1",
"FELIX_METADATAPORT": "1234",
"FELIX_IPINIPENABLED": "false",
"FELIX_IPINIPMTU": "1480",
"FELIX_DEFAULTENDPOINTTOHOSTACTION": "RETURN"
}
config = load_config("felix_missing.cfg", env_dict=env_dict)
m_v4_upd = Mock(spec=IptablesUpdater)
m_v6_upd = Mock(spec=IptablesUpdater)
m_v6_raw_upd = Mock(spec=IptablesUpdater)
m_v6_nat_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(config, m_v4_upd, m_v4_nat_upd,
ip_version=4)
frules.install_global_rules(config, m_v6_upd, m_v6_nat_upd,
ip_version=6, raw_updater=m_v6_raw_upd)
self.assertEqual(
m_v4_nat_upd.ensure_rule_inserted.mock_calls,
[call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False)]
)
m_v4_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
self.assertEqual(
m_v4_nat_upd.ensure_rule_removed.mock_calls,
[call("POSTROUTING --out-interface tunl0 "
"-m addrtype ! --src-type LOCAL --limit-iface-out "
"-m addrtype --src-type LOCAL "
"-j MASQUERADE",
async=False)]
)
m_v6_raw_upd.ensure_rule_inserted.assert_called_once_with(
'PREROUTING --in-interface tap+ --match rpfilter --invert --jump '
'felix-PREROUTING',
async=False,
)
m_v6_raw_upd.rewrite_chains.assert_called_once_with(
{'felix-PREROUTING': [
'--append felix-PREROUTING --jump DROP -m comment '
'--comment "IPv6 rpfilter failed"'
]},
{
'felix-PREROUTING': {}
},
async=False
)
self.assertFalse(m_ipset.ensure_exists.called)
self.assertFalse(m_check_call.called)
self.assertFalse(m_set_ips.called)
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT',
'--append felix-PREROUTING --protocol tcp --dport 80 --destination '
'169.254.169.254/32 --jump DNAT --to-destination 123.0.0.1:1234'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v4_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])},
async=False
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-INPUT': set(['felix-FROM-ENDPOINT']),
'felix-FORWARD': set(['felix-FROM-ENDPOINT',
'felix-TO-ENDPOINT'])},
async=False
)
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_no_ipv6(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_set_ips = m_devices.set_interface_ips
env_dict = {
"FELIX_ETCDADDR": "localhost:4001",
"FELIX_HOSTNAME": "myhost",
"FELIX_INTERFACEPREFIX": "tap",
"FELIX_METADATAADDR": "123.0.0.1",
"FELIX_METADATAPORT": "1234",
"FELIX_IPINIPENABLED": "false",
"FELIX_IPINIPMTU": "1480",
"FELIX_DEFAULTENDPOINTTOHOSTACTION": "RETURN"
}
config = load_config("felix_missing.cfg", env_dict=env_dict)
m_v4_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(config, m_v4_upd, m_v4_nat_upd,
ip_version=4)
self.assertEqual(
m_v4_nat_upd.ensure_rule_inserted.mock_calls,
[call("PREROUTING --jump felix-PREROUTING", async=False),
call("POSTROUTING --jump felix-POSTROUTING", async=False)]
)
m_v4_upd.ensure_rule_inserted.assert_has_calls([
call("INPUT --jump felix-INPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False)
]
)
self.assertEqual(
m_v4_nat_upd.ensure_rule_removed.mock_calls,
[call("POSTROUTING --out-interface tunl0 "
"-m addrtype ! --src-type LOCAL --limit-iface-out "
"-m addrtype --src-type LOCAL "
"-j MASQUERADE",
async=False)]
)
self.assertFalse(m_ipset.ensure_exists.called)
self.assertFalse(m_check_call.called)
self.assertFalse(m_set_ips.called)
"""
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT',
'--append felix-PREROUTING --protocol tcp --dport 80 --destination '
'169.254.169.254/32 --jump DNAT --to-destination 123.0.0.1:1234'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
expected_chains_2 = {
'felix-INPUT': [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
]
}
m_v4_upd.rewrite_chains.has_calls(
[call(expected_chains,
{'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])},
async=False),
call(expected_chains_2,
{'felix-INPUT': set(['felix-FROM-ENDPOINT']),
'felix-FORWARD': set(['felix-FROM-ENDPOINT',
'felix-TO-ENDPOINT'])},
async=False)]
)
"""
expected_chains = {
'felix-FIP-DNAT': [],
'felix-FIP-SNAT': [],
'felix-PREROUTING': [
'--append felix-PREROUTING --jump felix-FIP-DNAT',
'--append felix-PREROUTING --protocol tcp --dport 80 --destination '
'169.254.169.254/32 --jump DNAT --to-destination 123.0.0.1:1234'
],
'felix-POSTROUTING': [
'--append felix-POSTROUTING --jump felix-FIP-SNAT'
]
}
m_v4_nat_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-PREROUTING': set(['felix-FIP-DNAT']),
'felix-POSTROUTING': set(['felix-FIP-SNAT'])},
async=False
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{'felix-INPUT': set(['felix-FROM-ENDPOINT']),
'felix-FORWARD': set(['felix-FROM-ENDPOINT',
'felix-TO-ENDPOINT'])},
async=False
)
def test_install_global_rules_retries_ipip(self):
m_config = Mock()
m_config.IFACE_PREFIX = "tap"
m_config.IP_IN_IP_ENABLED = True
with patch("calico.felix.frules._configure_ipip_device") as m_ipip:
m_ipip.side_effect = FailedSystemCall("", [], 1, "", "")
self.assertRaises(FailedSystemCall,
frules.install_global_rules,
m_config, None, None, 4)
self.assertEqual(m_ipip.mock_calls,
[
call(m_config),
call(m_config)
])
def test_load_nf_conntrack(self):
with patch("calico.felix.futils.check_call", autospec=True) as m_call:
frules.load_nf_conntrack()
m_call.assert_called_once_with(["conntrack", "-S"])
def test_load_nf_conntrack_fail(self):
with patch("calico.felix.futils.check_call", autospec=True) as m_call:
m_call.side_effect = FailedSystemCall(message="bad call",
args=["conntrack", "-S"],
retcode=1,
stdout="", stderr="")
frules.load_nf_conntrack() # Exception should be caught
m_call.assert_called_once_with(["conntrack", "-S"])
| apache-2.0 |
QuanZag/tornado | tornado/util.py | 56 | 12922 | """Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import os
import sys
import zlib
try:
xrange # py2
except NameError:
xrange = range # py3
# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
# The two functions have compatible interfaces for the parts we need.
try:
from inspect import getfullargspec as getargspec # py3
except ImportError:
from inspect import getargspec # py2
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if not isinstance(b'', type('')):
def u(s):
return s
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
# These names don't exist in py3, so use noqa comments to disable
# warnings in flake8.
unicode_type = unicode # noqa
basestring_type = basestring # noqa
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if isinstance(name, unicode_type) and str is not unicode_type:
# On python 2 a byte string is required.
name = name.encode('utf-8')
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Deprecated alias that was used before we dropped py25 support.
# Left here in case anyone outside Tornado is using it.
bytes_type = bytes
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, *args, **kwargs):
base = cls.configurable_base()
init_kwargs = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
init_kwargs.update(base.__impl_kwargs)
else:
impl = cls
init_kwargs.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(*args, **init_kwargs)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
.. versionchanged:: 4.2
Now accepts positional arguments in addition to keyword arguments.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def get_old_value(self, args, kwargs, default=None):
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask = array.array("B", mask)
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ mask[i % 4]
if hasattr(unmasked, 'tobytes'):
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked.tobytes()
else:
return unmasked.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
| apache-2.0 |
Eforcers/inbox-cleaner | src/lib/gdata/tlslite/integration/TLSSocketServerMixIn.py | 320 | 2203 | """TLS Lite + SocketServer."""
from gdata.tlslite.TLSConnection import TLSConnection
class TLSSocketServerMixIn:
"""
This class can be mixed in with any L{SocketServer.TCPServer} to
add TLS support.
To use this class, define a new class that inherits from it and
some L{SocketServer.TCPServer} (with the mix-in first). Then
implement the handshake() method, doing some sort of server
handshake on the connection argument. If the handshake method
returns True, the RequestHandler will be triggered. Below is a
complete example of a threaded HTTPS server::
from SocketServer import *
from BaseHTTPServer import *
from SimpleHTTPServer import *
from tlslite.api import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
sessionCache = SessionCache()
class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn,
HTTPServer):
def handshake(self, tlsConnection):
try:
tlsConnection.handshakeServer(certChain=certChain,
privateKey=privateKey,
sessionCache=sessionCache)
tlsConnection.ignoreAbruptClose = True
return True
except TLSError, error:
print "Handshake failure:", str(error)
return False
httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler)
httpd.serve_forever()
"""
def finish_request(self, sock, client_address):
tlsConnection = TLSConnection(sock)
if self.handshake(tlsConnection) == True:
self.RequestHandlerClass(tlsConnection, client_address, self)
tlsConnection.close()
#Implement this method to do some form of handshaking. Return True
#if the handshake finishes properly and the request is authorized.
def handshake(self, tlsConnection):
raise NotImplementedError()
| mit |
tylerjereddy/diffusion_analysis_MD_simulations | conf.py | 1 | 8751 | # -*- coding: utf-8 -*-
#
# diffusion_analysis_MD_simulations documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 22 11:53:18 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
MOCK_MODULES = ['numpy','scipy','scipy.optimize','scipy.spatial.distance']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('numpydoc'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'diffusion_analysis_MD_simulations'
copyright = u'2014, Tyler Reddy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'diffusion_analysis_MD_simulationsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'diffusion_analysis_MD_simulations.tex', u'diffusion\\_analysis\\_MD\\_simulations Documentation',
u'Tyler Reddy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'diffusion_analysis_md_simulations', u'diffusion_analysis_MD_simulations Documentation',
[u'Tyler Reddy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'diffusion_analysis_MD_simulations', u'diffusion_analysis_MD_simulations Documentation',
u'Tyler Reddy', 'diffusion_analysis_MD_simulations', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
aeklant/scipy | scipy/sparse/tests/test_sparsetools.py | 5 | 10084 | import sys
import os
import gc
import threading
import numpy as np
from numpy.testing import assert_equal, assert_, assert_allclose
from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix,
bsr_matrix, dia_matrix)
from scipy.sparse.sputils import supported_dtypes, matrix
from scipy._lib._testutils import check_free_memory
import pytest
from pytest import raises as assert_raises
def test_exception():
assert_raises(MemoryError, _sparsetools.test_throw_error)
def test_threads():
# Smoke test for parallel threaded execution; doesn't actually
# check that code runs in parallel, but just that it produces
# expected results.
nthreads = 10
niter = 100
n = 20
a = csr_matrix(np.ones([n, n]))
bres = []
class Worker(threading.Thread):
def run(self):
b = a.copy()
for j in range(niter):
_sparsetools.csr_plus_csr(n, n,
a.indptr, a.indices, a.data,
a.indptr, a.indices, a.data,
b.indptr, b.indices, b.data)
bres.append(b)
threads = [Worker() for _ in range(nthreads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for b in bres:
assert_(np.all(b.toarray() == 2))
def test_regression_std_vector_dtypes():
# Regression test for gh-3780, checking the std::vector typemaps
# in sparsetools.cxx are complete.
for dtype in supported_dtypes:
ad = matrix([[1, 2], [3, 4]]).astype(dtype)
a = csr_matrix(ad, dtype=dtype)
# getcol is one function using std::vector typemaps, and should not fail
assert_equal(a.getcol(0).todense(), ad[:,0])
@pytest.mark.slow
def test_nnz_overflow():
# Regression test for gh-7230 / gh-7871, checking that coo_todense
# with nnz > int32max doesn't overflow.
nnz = np.iinfo(np.int32).max + 1
# Ensure ~20 GB of RAM is free to run this test.
check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5)
# Use nnz duplicate entries to keep the dense version small.
row = np.zeros(nnz, dtype=np.int32)
col = np.zeros(nnz, dtype=np.int32)
data = np.zeros(nnz, dtype=np.int8)
data[-1] = 4
s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False)
# Sums nnz duplicates to produce a 1x1 array containing 4.
d = s.toarray()
assert_allclose(d, [[4]])
@pytest.mark.skipif(not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8),
reason="test requires 64-bit Linux")
class TestInt32Overflow(object):
"""
Some of the sparsetools routines use dense 2D matrices whose
total size is not bounded by the nnz of the sparse matrix. These
routines used to suffer from int32 wraparounds; here, we try to
check that the wraparounds don't occur any more.
"""
# choose n large enough
n = 50000
def setup_method(self):
assert self.n**2 > np.iinfo(np.int32).max
# check there's enough memory even if everything is run at the
# same time
try:
parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1'))
except ValueError:
parallel_count = np.inf
check_free_memory(3000 * parallel_count)
def teardown_method(self):
gc.collect()
def test_coo_todense(self):
# Check *_todense routines (cf. gh-2179)
#
# All of them in the end call coo_matrix.todense
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
r = m.todense()
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
@pytest.mark.slow
def test_matvecs(self):
# Check *_matvecs routines
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
b = np.ones((n, n), dtype=np.int8)
for sptype in (csr_matrix, csc_matrix, bsr_matrix):
m2 = sptype(m)
r = m2.dot(b)
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
del b
gc.collect()
@pytest.mark.slow
def test_dia_matvec(self):
# Check: huge dia_matrix _matvec
n = self.n
data = np.ones((n, n), dtype=np.int8)
offsets = np.arange(n)
m = dia_matrix((data, offsets), shape=(n, n))
v = np.ones(m.shape[1], dtype=np.int8)
r = m.dot(v)
assert_equal(r[0], np.int8(n))
del data, offsets, m, v, r
gc.collect()
_bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow),
pytest.param("matvecs", marks=pytest.mark.xslow),
"matvec",
"diagonal",
"sort_indices",
pytest.param("transpose", marks=pytest.mark.xslow)]
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_1_block(self, op):
# Check: huge bsr_matrix (1-block)
#
# The point here is that indices inside a block may overflow.
def get_matrix():
n = self.n
data = np.ones((1, n, n), dtype=np.int8)
indptr = np.array([0, 1], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_n_block(self, op):
# Check: huge bsr_matrix (n-block)
#
# The point here is that while indices within a block don't
# overflow, accumulators across many block may.
def get_matrix():
n = self.n
data = np.ones((n, n, 1), dtype=np.int8)
indptr = np.array([0, n], dtype=np.int32)
indices = np.arange(n, dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
def _check_bsr_matvecs(self, m):
m = m()
n = self.n
# _matvecs
r = m.dot(np.ones((n, 2), dtype=np.int8))
assert_equal(r[0,0], np.int8(n))
def _check_bsr_matvec(self, m):
m = m()
n = self.n
# _matvec
r = m.dot(np.ones((n,), dtype=np.int8))
assert_equal(r[0], np.int8(n))
def _check_bsr_diagonal(self, m):
m = m()
n = self.n
# _diagonal
r = m.diagonal()
assert_equal(r, np.ones(n))
def _check_bsr_sort_indices(self, m):
# _sort_indices
m = m()
m.sort_indices()
def _check_bsr_transpose(self, m):
# _transpose
m = m()
m.transpose()
def _check_bsr_matmat(self, m):
m = m()
n = self.n
# _bsr_matmat
m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2))
m.dot(m2) # shouldn't SIGSEGV
del m2
# _bsr_matmat
m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0]))
m2.dot(m) # shouldn't SIGSEGV
@pytest.mark.skip(reason="64-bit indices in sparse matrices not available")
def test_csr_matmat_int64_overflow():
n = 3037000500
assert n**2 > np.iinfo(np.int64).max
# the test would take crazy amounts of memory
check_free_memory(n * (8*2 + 1) * 3 / 1e6)
# int64 overflow
data = np.ones((n,), dtype=np.int8)
indptr = np.arange(n+1, dtype=np.int64)
indices = np.zeros(n, dtype=np.int64)
a = csr_matrix((data, indices, indptr))
b = a.T
assert_raises(RuntimeError, a.dot, b)
def test_upcast():
a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex)
b0 = np.array([256+1j, 2**32], dtype=complex)
for a_dtype in supported_dtypes:
for b_dtype in supported_dtypes:
msg = "(%r, %r)" % (a_dtype, b_dtype)
if np.issubdtype(a_dtype, np.complexfloating):
a = a0.copy().astype(a_dtype)
else:
a = a0.real.copy().astype(a_dtype)
if np.issubdtype(b_dtype, np.complexfloating):
b = b0.copy().astype(b_dtype)
else:
b = b0.real.copy().astype(b_dtype)
if not (a_dtype == np.bool_ and b_dtype == np.bool_):
c = np.zeros((2,), dtype=np.bool_)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
if ((np.issubdtype(a_dtype, np.complexfloating) and
not np.issubdtype(b_dtype, np.complexfloating)) or
(not np.issubdtype(a_dtype, np.complexfloating) and
np.issubdtype(b_dtype, np.complexfloating))):
c = np.zeros((2,), dtype=np.float64)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype))
_sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c)
assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg)
def test_endianness():
d = np.ones((3,4))
offsets = [-1,0,1]
a = dia_matrix((d.astype('<f8'), offsets), (4, 4))
b = dia_matrix((d.astype('>f8'), offsets), (4, 4))
v = np.arange(4)
assert_allclose(a.dot(v), [1, 3, 6, 5])
assert_allclose(b.dot(v), [1, 3, 6, 5])
| bsd-3-clause |
m0re4u/LeRoT-SCLP | scripts/get_online_perf_latex.py | 3 | 2388 | #!/usr/bin/env python
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
from include import *
from math import sqrt
def get_significance(mean_1, mean_2, std_1, std_2, n):
significance = ""
ste_1 = std_1 / sqrt(n)
ste_2 = std_2 / sqrt(n)
t = (mean_1 - mean_2) / sqrt(ste_1 ** 2 + ste_2 ** 2)
#print t
if mean_1 > mean_2: # treatment is worse than baseline
# values used are for 120 degrees of freedom (http://changingminds.org/
# explanations/research/analysis/t-test_table.htm)
if abs(t) >= 2.62:
significance = "\dubbelneer"
elif abs(t) >= 1.98:
significance = "\enkelneer"
else:
if abs(t) >= 2.62:
significance = "\dubbelop"
elif abs(t) >= 1.98:
significance = "\enkelop"
return significance
def get_percent_improvement(mean_1, mean_2):
return (mean_2 - mean_1) / mean_1 * 100.0
perf = []
max_perf = 0
n = 125
baseline = 1 # usually: 0 for BI, 1 for TD
files = sys.argv[1:]
for input_file in files:
fh = open(input_file, "r")
lines = fh.readlines()
last_line = lines[-1]
(_, _, _, mean, std) = last_line.split()
mean = float(mean)
std = float(std)
if mean > max_perf:
max_perf = mean
perf.append((mean, std))
for i in range(len(perf)):
significance = get_significance(perf[baseline][0], perf[i][0], perf[baseline][1],
perf[i][1], n)
if perf[i][0] == max_perf:
print " & \\textbf{%.2f} & %s" % (perf[i][0], significance),
else:
print " & %.2f & %s" % (perf[i][0], significance),
print "%% baseline vs. last 2: %.1f%% %.1f%%" % (get_percent_improvement(perf[baseline][0], perf[-2][0]), get_percent_improvement(perf[baseline][0], perf[-1][0]))
#print ""
| gpl-3.0 |
jschneier/flask-admin | flask_admin/form/rules.py | 23 | 10905 | from jinja2 import Markup
from flask_admin._compat import string_types
from flask_admin import helpers
class BaseRule(object):
"""
Base form rule. All form formatting rules should derive from `BaseRule`.
"""
def __init__(self):
self.parent = None
self.rule_set = None
def configure(self, rule_set, parent):
"""
Configure rule and assign to rule set.
:param rule_set:
Rule set
:param parent:
Parent rule (if any)
"""
self.parent = parent
self.rule_set = rule_set
return self
@property
def visible_fields(self):
"""
A list of visible fields for the given rule.
"""
return []
def __call__(self, form, form_opts=None, field_args={}):
"""
Render rule.
:param form:
Form object
:param form_opts:
Form options
:param field_args:
Optional arguments that should be passed to template or the field
"""
raise NotImplementedError()
class NestedRule(BaseRule):
"""
Nested rule. Can contain child rules and render them.
"""
def __init__(self, rules=[], separator=''):
"""
Constructor.
:param rules:
Child rule list
:param separator:
Default separator between rules when rendering them.
"""
super(NestedRule, self).__init__()
self.rules = list(rules)
self.separator = separator
def configure(self, rule_set, parent):
"""
Configure rule.
:param rule_set:
Rule set
:param parent:
Parent rule (if any)
"""
self.rules = rule_set.configure_rules(self.rules, self)
return super(NestedRule, self).configure(rule_set, parent)
@property
def visible_fields(self):
"""
Return visible fields for all child rules.
"""
visible_fields = []
for rule in self.rules:
for field in rule.visible_fields:
visible_fields.append(field)
return visible_fields
def __iter__(self):
"""
Return rules.
"""
return self.rules
def __call__(self, form, form_opts=None, field_args={}):
"""
Render all children.
:param form:
Form object
:param form_opts:
Form options
:param field_args:
Optional arguments that should be passed to template or the field
"""
result = []
for r in self.rules:
result.append(r(form, form_opts, field_args))
return Markup(self.separator.join(result))
class Text(BaseRule):
"""
Render text (or HTML snippet) from string.
"""
def __init__(self, text, escape=True):
"""
Constructor.
:param text:
Text to render
:param escape:
Should text be escaped or not. Default is `True`.
"""
super(Text, self).__init__()
self.text = text
self.escape = escape
def __call__(self, form, form_opts=None, field_args={}):
if self.escape:
return self.text
return Markup(self.text)
class HTML(Text):
"""
Shortcut for `Text` rule with `escape` set to `False`.
"""
def __init__(self, html):
super(HTML, self).__init__(html, escape=False)
class Macro(BaseRule):
"""
Render macro by its name from current Jinja2 context.
"""
def __init__(self, macro_name, **kwargs):
"""
Constructor.
:param macro_name:
Macro name
:param kwargs:
Default macro parameters
"""
super(Macro, self).__init__()
self.macro_name = macro_name
self.default_args = kwargs
def _resolve(self, context, name):
"""
Resolve macro in a Jinja2 context
:param context:
Jinja2 context
:param name:
Macro name. May be full path (with dots)
"""
parts = name.split('.')
try:
field = context.resolve(parts[0])
except AttributeError:
raise Exception('Your template is missing '
'"{% set render_ctx = h.resolve_ctx() %}"')
if not field:
return None
for p in parts[1:]:
field = getattr(field, p, None)
if not field:
return field
return field
def __call__(self, form, form_opts=None, field_args={}):
"""
Render macro rule.
:param form:
Form object
:param form_opts:
Form options
:param field_args:
Optional arguments that should be passed to the macro
"""
context = helpers.get_render_ctx()
macro = self._resolve(context, self.macro_name)
if not macro:
raise ValueError('Cannot find macro %s in current context.' % self.macro_name)
opts = dict(self.default_args)
opts.update(field_args)
return macro(**opts)
class Container(Macro):
"""
Render container around child rule.
"""
def __init__(self, macro_name, child_rule, **kwargs):
"""
Constructor.
:param macro_name:
Macro name that will be used as a container
:param child_rule:
Child rule to be rendered inside of container
:param kwargs:
Container macro arguments
"""
super(Container, self).__init__(macro_name, **kwargs)
self.child_rule = child_rule
def configure(self, rule_set, parent):
"""
Configure rule.
:param rule_set:
Rule set
:param parent:
Parent rule (if any)
"""
self.child_rule.configure(rule_set, self)
return super(Container, self).configure(rule_set, parent)
@property
def visible_fields(self):
return self.child_rule.visible_fields
def __call__(self, form, form_opts=None, field_args={}):
"""
Render container.
:param form:
Form object
:param form_opts:
Form options
:param field_args:
Optional arguments that should be passed to template or the field
"""
context = helpers.get_render_ctx()
def caller(**kwargs):
return context.call(self.child_rule, form, form_opts, kwargs)
args = dict(field_args)
args['caller'] = caller
return super(Container, self).__call__(form, form_opts, args)
class Field(Macro):
"""
Form field rule.
"""
def __init__(self, field_name, render_field='lib.render_field'):
"""
Constructor.
:param field_name:
Field name to render
:param render_field:
Macro that will be used to render the field.
"""
super(Field, self).__init__(render_field)
self.field_name = field_name
@property
def visible_fields(self):
return [self.field_name]
def __call__(self, form, form_opts=None, field_args={}):
"""
Render field.
:param form:
Form object
:param form_opts:
Form options
:param field_args:
Optional arguments that should be passed to template or the field
"""
field = getattr(form, self.field_name, None)
if field is None:
raise ValueError('Form %s does not have field %s' % (form, self.field_name))
opts = {}
if form_opts:
opts.update(form_opts.widget_args.get(self.field_name, {}))
opts.update(field_args)
params = {
'form': form,
'field': field,
'kwargs': opts
}
return super(Field, self).__call__(form, form_opts, params)
class Header(Macro):
"""
Render header text.
"""
def __init__(self, text, header_macro='lib.render_header'):
"""
Constructor.
:param text:
Text to render
:param header_macro:
Header rendering macro
"""
super(Header, self).__init__(header_macro, text=text)
class FieldSet(NestedRule):
"""
Field set with header.
"""
def __init__(self, rules, header=None, separator=''):
"""
Constructor.
:param rules:
Child rules
:param header:
Header text
:param separator:
Child rule separator
"""
if header:
rule_set = [Header(header)] + list(rules)
else:
rule_set = list(rules)
super(FieldSet, self).__init__(rule_set, separator=separator)
class RuleSet(object):
"""
Rule set.
"""
def __init__(self, view, rules):
"""
Constructor.
:param view:
Administrative view
:param rules:
Rule list
"""
self.view = view
self.rules = self.configure_rules(rules)
@property
def visible_fields(self):
visible_fields = []
for rule in self.rules:
for field in rule.visible_fields:
visible_fields.append(field)
return visible_fields
def convert_string(self, value):
"""
Convert string to rule.
Override this method to change default behavior.
"""
return Field(value)
def configure_rules(self, rules, parent=None):
"""
Configure all rules recursively - bind them to current RuleSet and
convert string references to `Field` rules.
:param rules:
Rule list
:param parent:
Parent rule (if any)
"""
result = []
for r in rules:
if isinstance(r, string_types):
result.append(self.convert_string(r).configure(self, parent))
else:
try:
result.append(r.configure(self, parent))
except AttributeError:
raise TypeError('Could not convert "%s" to rule' % repr(r))
return result
def __iter__(self):
"""
Iterate through registered rules.
"""
for r in self.rules:
yield r
| bsd-3-clause |
greenoaktree/MissionPlanner | Lib/site-packages/scipy/signal/filter_design.py | 53 | 63381 | """Filter design.
"""
import types
import warnings
import numpy
from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \
resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \
cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh
from numpy import mintypecode
from scipy import special, optimize
from scipy.misc import comb
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
ep = atleast_1d(roots(den))+0j
tz = atleast_1d(roots(num))+0j
if len(ep) == 0:
ep = atleast_1d(-1000)+0j
ez = r_['-1',numpy.compress(ep.imag >=0, ep,axis=-1), numpy.compress((abs(tz) < 1e5) & (tz.imag >=0),tz,axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3*abs(ez.real + integ)+1.5*ez.imag))+0.5)
lfreq = numpy.around(numpy.log10(0.1*numpy.min(abs(real(ez+integ))+2*ez.imag))-0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
"""
if worN is None:
w = findfreqs(b,a,200)
elif isinstance(worN, types.IntType):
N = worN
w = findfreqs(b,a,N)
else:
w = worN
w = atleast_1d(w)
s = 1j*w
h = polyval(b, s) / polyval(a, s)
if not plot is None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator ``b`` and denominator ``a`` of a digital filter compute
its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int}, optional
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : bool, optional
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is False, compute frequencies from 0 to 2*pi.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
Examples
--------
>>> b = firwin(80, 0.5, window=('kaiser', 8))
>>> h, w = freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.semilogy(h, np.abs(w), 'b')
>>> plt.ylabel('Amplitude (dB)', color='b')
>>> plt.xlabel('Frequency (rad/sample)')
>>> plt.grid()
>>> plt.legend()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(w))
>>> plt.plot(h, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.show()
"""
b, a = map(atleast_1d, (b,a))
if whole:
lastpoint = 2*pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.arange(0,lastpoint,lastpoint/N)
elif isinstance(worN, types.IntType):
N = worN
w = numpy.arange(0,lastpoint,lastpoint/N)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j*w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if not plot is None:
plot(w, h)
return w, h
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = normalize(b,a)
b = (b+0.0) / a[0]
a = (a+0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros
and poles
Parameters
----------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1]+1), temp.dtype.char)
if len(k) == 1:
k = [k[0]]*z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
return b, a
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = map(atleast_1d,(b,a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or rank-2 array.")
if len(b.shape) == 1:
b = asarray([b],b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(outb[:,0], 0, rtol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(outb[:,0], 0, rtol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:,1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""Return a low-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d,n))
pwo = pow(wo,numpy.arange(M-1,-1,-1))
start1 = max((n-d,0))
start2 = max((d-n,0))
b = b * pwo[start1]/pwo[start2:]
a = a * pwo[start1]/pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""Return a high-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo,numpy.arange(max((d,n))))
else:
pwo = numpy.ones(max((d,n)),b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b,(d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a,(n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""Return a band-pass filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
ma = max([N,D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*b[N-i]*(wosq)**(i-k) / bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*a[D-i]*(wosq)**(i-k) / bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1, bw=1):
"""Return a band-stop filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
M = max([N,D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*b[N-i]*(wosq)**(M-i-k) * bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*a[D-i]*(wosq)**(M-i-k) * bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog filter using the bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``.
"""
fs =float(fs)
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N,D])
Np = M
Dp = M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
for j in range(Np+1):
val = 0.0
for i in range(N+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k
bprime[j] = real(val)
for j in range(Dp+1):
val = 0.0
for i in range(D+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba') or pole-zero ('zpk') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a :
Numerator and denominator of the IIR filter. Only returned if
``output='ba'``.
z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if
``output='zpk'``.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError("%s does not have order selection use iirfilter function." % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2*(len(wp)-1)
band_type +=1
if wp[0] >= ws[0]:
band_type += 1
btype = {1:'lowpass', 2:'highpass', 3:'bandstop', 4:'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'):
"""IIR digital and analog filter design given order and critical points.
Design an Nth order lowpass digital or analog filter and return the filter
coefficients in (B,A) (numerator, denominator) or (Z,P,K) form.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
rp : float, optional
For Chebyshev and elliptic filters provides the maximum ripple
in the passband.
rs : float, optional
For chebyshev and elliptic filters provides the minimum attenuation in
the stop band.
btype : str, optional
The type of filter (lowpass, highpass, bandpass, bandstop).
Default is bandpass.
analog : int, optional
Non-zero to return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
See Also
--------
butterord, cheb1ord, cheb2ord, ellipord
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("%s is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("%s is not a valid basic iir filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("%s is not a valid output form." % output)
#pre-warp frequencies for digital filter design
if not analog:
fs = 2.0
warped = 2*fs*tan(pi*Wn/fs)
else:
warped = Wn
# convert to low-pass prototype
if btype in ['lowpass', 'highpass']:
wo = warped
else:
bw = warped[1] - warped[0]
wo = sqrt(warped[0]*warped[1])
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband atteunatuion (rs) must be provided to design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
else: # Elliptic filters
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an elliptic filter.")
z, p, k = typefunc(N, rp, rs)
b, a = zpk2tf(z,p,k)
# transform to lowpass, bandpass, highpass, or bandstop
if btype == 'lowpass':
b, a = lp2lp(b,a,wo=wo)
elif btype == 'highpass':
b, a = lp2hp(b,a,wo=wo)
elif btype == 'bandpass':
b, a = lp2bp(b,a,wo=wo,bw=bw)
else: # 'bandstop'
b, a = lp2bs(b,a,wo=wo,bw=bw)
# Find discrete equivalent if necessary
if not analog:
b, a = bilinear(b, a, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return tf2zpk(b,a)
else:
return b,a
def butter(N, Wn, btype='low', analog=0, output='ba'):
"""Butterworth digital and analog filter design.
Design an Nth order lowpass digital or analog Butterworth filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
buttord.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb1ord.
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb2ord.
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'):
"""Elliptic (Cauer) digital and analog filter design.
Design an Nth order lowpass digital or analog elliptic filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
ellipord.
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=0, output='ba'):
"""Bessel digital and analog filter design.
Design an Nth order lowpass digital or analog Bessel filter and return the
filter coefficients in (B,A) or (Z,P,K) form.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp :
Edge of passband `passb`.
ind : int
Index specifying which `passb` edge to vary (0 or 1).
passb : array_like
Two element sequence of fixed passband edges.
stopb : array_like
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : ['butter', 'cheby', 'ellip']
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = stopb*(passbC[0]-passbC[1]) / (stopb**2 - passbC[0]*passbC[1])
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = (log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat)))
elif type == 'cheby':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
n = (d0[0]*d1[1] / (d0[1]*d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=0):
"""Butterworth filter order selection.
Return the order of the lowest order digital Butterworth filter that loses
no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type +=1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies
if not analog:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil( log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat))))
# Find the butterworth natural frequency W0 (or the "3dB" frequency")
# to give exactly gstop at nat. W0 will be between 1 and nat
try:
W0 = nat / ( ( 10**(0.1*abs(gstop))-1)**(1.0/(2.0*ord)))
except ZeroDivisionError:
W0 = nat
print "Warning, order is zero...check input parametegstop."
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0*passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2,float)
WN[0] = ((passb[1] - passb[0]) + sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN[1] = ((passb[1] - passb[0]) - sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0],float)
WN = -W0 * (passb[1]-passb[0]) / 2.0 + sqrt(W0**2 / 4.0 * \
(passb[1]-passb[0])**2 + \
passb[0]*passb[1])
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0/pi)*arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital Chebyshev Type I filter that
loses no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.)
stopb = tan(pi*ws/2.)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0/pi)*arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type II filter order selection.
Description:
Return the order of the lowest order digital Chebyshev Type II filter
that loses no more than gpass dB in the passband and has at least gstop dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.0)
stopb = tan(pi*ws/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0/ord * arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2,float)
nat[0] = new_freq / 2.0 * (passb[0]-passb[1]) + \
sqrt(new_freq**2 * (passb[1]-passb[0])**2 / 4.0 + \
passb[1] * passb[0])
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2,float)
nat[0] = 1.0/(2.0*new_freq) * (passb[0] - passb[1]) + \
sqrt((passb[1]-passb[0])**2 / (4.0*new_freq**2) + \
passb[1] * passb[0])
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0/pi)*arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=0):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital elliptic filter that loses no
more than gpass dB in the passband and has at least gstop dB attenuation in
the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.-
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-wagpass frequencies
if analog:
passb = wp*1.0
stopb = ws*1.0
else:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
ord = int(ceil(d0[0]*d1[1] / (d0[1]*d1[0])))
if not analog:
wn = arctan(passb)*2.0/pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth
order Butterworth filter."""
z = []
n = numpy.arange(1,N+1)
p = numpy.exp(1j*(2*n-1)/(2.0*N)*pi)*1j
k = 1
return z, p, k
def cheb1ap(N, rp):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass
analog filter prototype with `rp` decibels of ripple in the passband.
"""
z = []
eps = numpy.sqrt(10**(0.1*rp)-1.0)
n = numpy.arange(1,N+1)
mu = 1.0/N * numpy.log((1.0+numpy.sqrt(1+eps*eps)) / eps)
theta = pi/2.0 * (2*n-1.0)/N
p = -numpy.sinh(mu)*numpy.sin(theta) + 1j*numpy.cosh(mu)*numpy.cos(theta)
k = numpy.prod(-p,axis=0).real
if N % 2 == 0:
k = k / sqrt((1+eps*eps))
return z, p, k
pass
def cheb2ap(N, rs):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass
analog filter prototype with `rs` decibels of ripple in the stopband.
"""
de = 1.0/sqrt(10**(0.1*rs)-1)
mu = arcsinh(1.0/de)/N
if N % 2:
m = N - 1
n = numpy.concatenate((numpy.arange(1,N-1,2),numpy.arange(N+2,2*N,2)))
else:
m = N
n = numpy.arange(1,2*N,2)
z = conjugate(1j / cos(n*pi/(2.0*N)))
p = exp(1j*(pi*numpy.arange(1,2*N,2)/(2.0*N) + pi/2.0))
p = sinh(mu) * p.real + 1j*cosh(mu)*p.imag
p = 1.0 / p
k = (numpy.prod(-p,axis=0)/numpy.prod(-z,axis=0)).real
return z, p, k
EPSILON = 2e-16
def vratio(u, ineps, mp):
[s,c,d,phi] = special.ellipj(u,mp)
ret = abs(ineps - s/c)
return ret
def kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m,1-m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) zeros, poles, and gain of an Nth order normalized
prototype elliptic analog lowpass filter with `rp` decibels of ripple in
the passband and a stopband `rs` decibels down.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if N == 1:
p = -sqrt(1.0/(10**(0.1*rp)-1.0))
k = -p
z = []
return z, p, k
eps = numpy.sqrt(10**(0.1*rp)-1)
ck1 = eps / numpy.sqrt(10**(0.1*rs)-1)
ck1p = numpy.sqrt(1-ck1*ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs specifications.")
wp = 1
val = special.ellipk([ck1*ck1,ck1p*ck1p])
if abs(1-ck1p*ck1p) < EPSILON:
krat = 0
else:
krat = N*val[0] / val[1]
m = optimize.fmin(kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
ws = wp / sqrt(m)
m1 = 1-m
j = numpy.arange(1-N%2,N,2)
jj = len(j)
[s,c,d,phi] = special.ellipj(j*capk/N,m*numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s,axis=-1)
z = 1.0 / (sqrt(m)*snew)
z = 1j*z
z = numpy.concatenate((z,conjugate(z)))
r = optimize.fmin(vratio, special.ellipk(m), args=(1./eps, ck1p*ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N*val[0])
[sv,cv,dv,phi] = special.ellipj(v0,1-m)
p = -(c*d*sv*cv + 1j*s*dv) / (1-(d*sv)**2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON*numpy.sqrt(numpy.sum(p*numpy.conjugate(p),axis=0).real), p,axis=-1)
p = numpy.concatenate((p,conjugate(newp)))
else:
p = numpy.concatenate((p,conjugate(p)))
k = (numpy.prod(-p,axis=0) / numpy.prod(-z,axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1+eps*eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth order
Bessel filter."""
z = []
k = 1
if N == 0:
p = [];
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229+.4999999999999999999999996*1j,
-.8660254037844386467637229-.4999999999999999999999996*1j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907-.7113666249728352680992154*1j,
-.7456403858480766441810907+.7113666249728352680992154*1j]
elif N == 4:
p = [-.6572111716718829545787781-.8301614350048733772399715*1j,
-.6572111716718829545787788+.8301614350048733772399715*1j,
-.9047587967882449459642637-.2709187330038746636700923*1j,
-.9047587967882449459642624+.2709187330038746636700926*1j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677-.4427174639443327209850002*1j,
-.8515536193688395541722677+.4427174639443327209850002*1j,
-.5905759446119191779319432-.9072067564574549539291747*1j,
-.5905759446119191779319432+.9072067564574549539291747*1j]
elif N == 6:
p = [-.9093906830472271808050953-.1856964396793046769246397*1j,
-.9093906830472271808050953+.1856964396793046769246397*1j,
-.7996541858328288520243325-.5621717346937317988594118*1j,
-.7996541858328288520243325+.5621717346937317988594118*1j,
-.5385526816693109683073792-.9616876881954277199245657*1j,
-.5385526816693109683073792+.9616876881954277199245657*1j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340-.3216652762307739398381830*1j,
-.8800029341523374639772340+.3216652762307739398381830*1j,
-.7527355434093214462291616-.6504696305522550699212995*1j,
-.7527355434093214462291616+.6504696305522550699212995*1j,
-.4966917256672316755024763-1.002508508454420401230220*1j,
-.4966917256672316755024763+1.002508508454420401230220*1j]
elif N == 8:
p = [-.9096831546652910216327629-.1412437976671422927888150*1j,
-.9096831546652910216327629+.1412437976671422927888150*1j,
-.8473250802359334320103023-.4259017538272934994996429*1j,
-.8473250802359334320103023+.4259017538272934994996429*1j,
-.7111381808485399250796172-.7186517314108401705762571*1j,
-.7111381808485399250796172+.7186517314108401705762571*1j,
-.4621740412532122027072175-1.034388681126901058116589*1j,
-.4621740412532122027072175+1.034388681126901058116589*1j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848-.2526580934582164192308115*1j,
-.8911217017079759323183848+.2526580934582164192308115*1j,
-.8148021112269012975514135-.5085815689631499483745341*1j,
-.8148021112269012975514135+.5085815689631499483745341*1j,
-.6743622686854761980403401-.7730546212691183706919682*1j,
-.6743622686854761980403401+.7730546212691183706919682*1j,
-.4331415561553618854685942-1.060073670135929666774323*1j,
-.4331415561553618854685942+1.060073670135929666774323*1j]
elif N == 10:
p = [-.9091347320900502436826431-.1139583137335511169927714*1j,
-.9091347320900502436826431+.1139583137335511169927714*1j,
-.8688459641284764527921864-.3430008233766309973110589*1j,
-.8688459641284764527921864+.3430008233766309973110589*1j,
-.7837694413101441082655890-.5759147538499947070009852*1j,
-.7837694413101441082655890+.5759147538499947070009852*1j,
-.6417513866988316136190854-.8175836167191017226233947*1j,
-.6417513866988316136190854+.8175836167191017226233947*1j,
-.4083220732868861566219785-1.081274842819124562037210*1j,
-.4083220732868861566219785+1.081274842819124562037210*1j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744-.2080480375071031919692341*1j
-.8963656705721166099815744+.2080480375071031919692341*1j,
-.8453044014712962954184557-.4178696917801248292797448*1j,
-.8453044014712962954184557+.4178696917801248292797448*1j,
-.7546938934722303128102142-.6319150050721846494520941*1j,
-.7546938934722303128102142+.6319150050721846494520941*1j,
-.6126871554915194054182909-.8547813893314764631518509*1j,
-.6126871554915194054182909+.8547813893314764631518509*1j,
-.3868149510055090879155425-1.099117466763120928733632*1j,
-.3868149510055090879155425+1.099117466763120928733632*1j]
elif N == 12:
p = [-.9084478234140682638817772-95506365213450398415258360.0e-27*1j,
-.9084478234140682638817772+95506365213450398415258360.0e-27*1j,
-.8802534342016826507901575-.2871779503524226723615457*1j,
-.8802534342016826507901575+.2871779503524226723615457*1j,
-.8217296939939077285792834-.4810212115100676440620548*1j,
-.8217296939939077285792834+.4810212115100676440620548*1j,
-.7276681615395159454547013-.6792961178764694160048987*1j,
-.7276681615395159454547013+.6792961178764694160048987*1j,
-.5866369321861477207528215-.8863772751320727026622149*1j,
-.5866369321861477207528215+.8863772751320727026622149*1j,
-.3679640085526312839425808-1.114373575641546257595657*1j,
-.3679640085526312839425808+1.114373575641546257595657*1j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718-.1768342956161043620980863*1j,
-.8991314665475196220910718+.1768342956161043620980863*1j,
-.8625094198260548711573628-.3547413731172988997754038*1j,
-.8625094198260548711573628+.3547413731172988997754038*1j,
-.7987460692470972510394686-.5350752120696801938272504*1j,
-.7987460692470972510394686+.5350752120696801938272504*1j,
-.7026234675721275653944062-.7199611890171304131266374*1j,
-.7026234675721275653944062+.7199611890171304131266374*1j,
-.5631559842430199266325818-.9135900338325109684927731*1j,
-.5631559842430199266325818+.9135900338325109684927731*1j,
-.3512792323389821669401925-1.127591548317705678613239*1j,
-.3512792323389821669401925+1.127591548317705678613239*1j]
elif N == 14:
p = [-.9077932138396487614720659-82196399419401501888968130.0e-27*1j,
-.9077932138396487614720659+82196399419401501888968130.0e-27*1j,
-.8869506674916445312089167-.2470079178765333183201435*1j,
-.8869506674916445312089167+.2470079178765333183201435*1j,
-.8441199160909851197897667-.4131653825102692595237260*1j,
-.8441199160909851197897667+.4131653825102692595237260*1j,
-.7766591387063623897344648-.5819170677377608590492434*1j,
-.7766591387063623897344648+.5819170677377608590492434*1j,
-.6794256425119233117869491-.7552857305042033418417492*1j,
-.6794256425119233117869491+.7552857305042033418417492*1j,
-.5418766775112297376541293-.9373043683516919569183099*1j,
-.5418766775112297376541293+.9373043683516919569183099*1j,
-.3363868224902037330610040-1.139172297839859991370924*1j,
-.3363868224902037330610040+1.139172297839859991370924*1j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918-.1537681197278439351298882*1j,
-.9006981694176978324932918+.1537681197278439351298882*1j,
-.8731264620834984978337843-.3082352470564267657715883*1j,
-.8731264620834984978337843+.3082352470564267657715883*1j,
-.8256631452587146506294553-.4642348752734325631275134*1j,
-.8256631452587146506294553+.4642348752734325631275134*1j,
-.7556027168970728127850416-.6229396358758267198938604*1j,
-.7556027168970728127850416+.6229396358758267198938604*1j,
-.6579196593110998676999362-.7862895503722515897065645*1j,
-.6579196593110998676999362+.7862895503722515897065645*1j,
-.5224954069658330616875186-.9581787261092526478889345*1j,
-.5224954069658330616875186+.9581787261092526478889345*1j,
-.3229963059766444287113517-1.149416154583629539665297*1j,
-.3229963059766444287113517+1.149416154583629539665297*1j]
elif N == 16:
p = [-.9072099595087001356491337-72142113041117326028823950.0e-27*1j,
-.9072099595087001356491337+72142113041117326028823950.0e-27*1j,
-.8911723070323647674780132-.2167089659900576449410059*1j,
-.8911723070323647674780132+.2167089659900576449410059*1j,
-.8584264231521330481755780-.3621697271802065647661080*1j,
-.8584264231521330481755780+.3621697271802065647661080*1j,
-.8074790293236003885306146-.5092933751171800179676218*1j,
-.8074790293236003885306146+.5092933751171800179676218*1j,
-.7356166304713115980927279-.6591950877860393745845254*1j,
-.7356166304713115980927279+.6591950877860393745845254*1j,
-.6379502514039066715773828-.8137453537108761895522580*1j,
-.6379502514039066715773828+.8137453537108761895522580*1j,
-.5047606444424766743309967-.9767137477799090692947061*1j,
-.5047606444424766743309967+.9767137477799090692947061*1j,
-.3108782755645387813283867-1.158552841199330479412225*1j,
-.3108782755645387813283867+1.158552841199330479412225*1j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844-.1360267995173024591237303*1j,
-.9016273850787285964692844+.1360267995173024591237303*1j,
-.8801100704438627158492165-.2725347156478803885651973*1j,
-.8801100704438627158492165+.2725347156478803885651973*1j,
-.8433414495836129204455491-.4100759282910021624185986*1j,
-.8433414495836129204455491+.4100759282910021624185986*1j,
-.7897644147799708220288138-.5493724405281088674296232*1j,
-.7897644147799708220288138+.5493724405281088674296232*1j,
-.7166893842372349049842743-.6914936286393609433305754*1j,
-.7166893842372349049842743+.6914936286393609433305754*1j,
-.6193710717342144521602448-.8382497252826992979368621*1j,
-.6193710717342144521602448+.8382497252826992979368621*1j,
-.4884629337672704194973683-.9932971956316781632345466*1j,
-.4884629337672704194973683+.9932971956316781632345466*1j,
-.2998489459990082015466971-1.166761272925668786676672*1j,
-.2998489459990082015466971+1.166761272925668786676672*1j]
elif N == 18:
p = [-.9067004324162775554189031-64279241063930693839360680.0e-27*1j,
-.9067004324162775554189031+64279241063930693839360680.0e-27*1j,
-.8939764278132455733032155-.1930374640894758606940586*1j,
-.8939764278132455733032155+.1930374640894758606940586*1j,
-.8681095503628830078317207-.3224204925163257604931634*1j,
-.8681095503628830078317207+.3224204925163257604931634*1j,
-.8281885016242836608829018-.4529385697815916950149364*1j,
-.8281885016242836608829018+.4529385697815916950149364*1j,
-.7726285030739558780127746-.5852778162086640620016316*1j,
-.7726285030739558780127746+.5852778162086640620016316*1j,
-.6987821445005273020051878-.7204696509726630531663123*1j,
-.6987821445005273020051878+.7204696509726630531663123*1j,
-.6020482668090644386627299-.8602708961893664447167418*1j,
-.6020482668090644386627299+.8602708961893664447167418*1j,
-.4734268069916151511140032-1.008234300314801077034158*1j,
-.4734268069916151511140032+1.008234300314801077034158*1j,
-.2897592029880489845789953-1.174183010600059128532230*1j,
-.2897592029880489845789953+1.174183010600059128532230*1j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536-.1219568381872026517578164*1j,
-.9021937639390660668922536+.1219568381872026517578164*1j,
-.8849290585034385274001112-.2442590757549818229026280*1j,
-.8849290585034385274001112+.2442590757549818229026280*1j,
-.8555768765618421591093993-.3672925896399872304734923*1j,
-.8555768765618421591093993+.3672925896399872304734923*1j,
-.8131725551578197705476160-.4915365035562459055630005*1j,
-.8131725551578197705476160+.4915365035562459055630005*1j,
-.7561260971541629355231897-.6176483917970178919174173*1j,
-.7561260971541629355231897+.6176483917970178919174173*1j,
-.6818424412912442033411634-.7466272357947761283262338*1j,
-.6818424412912442033411634+.7466272357947761283262338*1j,
-.5858613321217832644813602-.8801817131014566284786759*1j,
-.5858613321217832644813602+.8801817131014566284786759*1j,
-.4595043449730988600785456-1.021768776912671221830298*1j,
-.4595043449730988600785456+1.021768776912671221830298*1j,
-.2804866851439370027628724-1.180931628453291873626003*1j,
-.2804866851439370027628724+1.180931628453291873626003*1j]
elif N == 20:
p = [-.9062570115576771146523497-57961780277849516990208850.0e-27*1j,
-.9062570115576771146523497+57961780277849516990208850.0e-27*1j,
-.8959150941925768608568248-.1740317175918705058595844*1j,
-.8959150941925768608568248+.1740317175918705058595844*1j,
-.8749560316673332850673214-.2905559296567908031706902*1j,
-.8749560316673332850673214+.2905559296567908031706902*1j,
-.8427907479956670633544106-.4078917326291934082132821*1j,
-.8427907479956670633544106+.4078917326291934082132821*1j,
-.7984251191290606875799876-.5264942388817132427317659*1j,
-.7984251191290606875799876+.5264942388817132427317659*1j,
-.7402780309646768991232610-.6469975237605228320268752*1j,
-.7402780309646768991232610+.6469975237605228320268752*1j,
-.6658120544829934193890626-.7703721701100763015154510*1j,
-.6658120544829934193890626+.7703721701100763015154510*1j,
-.5707026806915714094398061-.8982829066468255593407161*1j,
-.5707026806915714094398061+.8982829066468255593407161*1j,
-.4465700698205149555701841-1.034097702560842962315411*1j,
-.4465700698205149555701841+1.034097702560842962315411*1j,
-.2719299580251652601727704-1.187099379810885886139638*1j,
-.2719299580251652601727704+1.187099379810885886139638*1j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083-.1105252572789856480992275*1j,
-.9025428073192696303995083+.1105252572789856480992275*1j,
-.8883808106664449854431605-.2213069215084350419975358*1j,
-.8883808106664449854431605+.2213069215084350419975358*1j,
-.8643915813643204553970169-.3326258512522187083009453*1j,
-.8643915813643204553970169+.3326258512522187083009453*1j,
-.8299435470674444100273463-.4448177739407956609694059*1j,
-.8299435470674444100273463+.4448177739407956609694059*1j,
-.7840287980408341576100581-.5583186348022854707564856*1j,
-.7840287980408341576100581+.5583186348022854707564856*1j,
-.7250839687106612822281339-.6737426063024382240549898*1j,
-.7250839687106612822281339+.6737426063024382240549898*1j,
-.6506315378609463397807996-.7920349342629491368548074*1j,
-.6506315378609463397807996+.7920349342629491368548074*1j,
-.5564766488918562465935297-.9148198405846724121600860*1j,
-.5564766488918562465935297+.9148198405846724121600860*1j,
-.4345168906815271799687308-1.045382255856986531461592*1j,
-.4345168906815271799687308+1.045382255856986531461592*1j,
-.2640041595834031147954813-1.192762031948052470183960*1j,
-.2640041595834031147954813+1.192762031948052470183960*1j]
elif N == 22:
p = [-.9058702269930872551848625-52774908289999045189007100.0e-27*1j,
-.9058702269930872551848625+52774908289999045189007100.0e-27*1j,
-.8972983138153530955952835-.1584351912289865608659759*1j,
-.8972983138153530955952835+.1584351912289865608659759*1j,
-.8799661455640176154025352-.2644363039201535049656450*1j,
-.8799661455640176154025352+.2644363039201535049656450*1j,
-.8534754036851687233084587-.3710389319482319823405321*1j,
-.8534754036851687233084587+.3710389319482319823405321*1j,
-.8171682088462720394344996-.4785619492202780899653575*1j,
-.8171682088462720394344996+.4785619492202780899653575*1j,
-.7700332930556816872932937-.5874255426351153211965601*1j,
-.7700332930556816872932937+.5874255426351153211965601*1j,
-.7105305456418785989070935-.6982266265924524000098548*1j,
-.7105305456418785989070935+.6982266265924524000098548*1j,
-.6362427683267827226840153-.8118875040246347267248508*1j,
-.6362427683267827226840153+.8118875040246347267248508*1j,
-.5430983056306302779658129-.9299947824439872998916657*1j,
-.5430983056306302779658129+.9299947824439872998916657*1j,
-.4232528745642628461715044-1.055755605227545931204656*1j,
-.4232528745642628461715044+1.055755605227545931204656*1j,
-.2566376987939318038016012-1.197982433555213008346532*1j,
-.2566376987939318038016012+1.197982433555213008346532*1j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993-.1010534335314045013252480*1j,
-.9027564979912504609412993+.1010534335314045013252480*1j,
-.8909283242471251458653994-.2023024699381223418195228*1j,
-.8909283242471251458653994+.2023024699381223418195228*1j,
-.8709469395587416239596874-.3039581993950041588888925*1j,
-.8709469395587416239596874+.3039581993950041588888925*1j,
-.8423805948021127057054288-.4062657948237602726779246*1j,
-.8423805948021127057054288+.4062657948237602726779246*1j,
-.8045561642053176205623187-.5095305912227258268309528*1j,
-.8045561642053176205623187+.5095305912227258268309528*1j,
-.7564660146829880581478138-.6141594859476032127216463*1j,
-.7564660146829880581478138+.6141594859476032127216463*1j,
-.6965966033912705387505040-.7207341374753046970247055*1j,
-.6965966033912705387505040+.7207341374753046970247055*1j,
-.6225903228771341778273152-.8301558302812980678845563*1j,
-.6225903228771341778273152+.8301558302812980678845563*1j,
-.5304922463810191698502226-.9439760364018300083750242*1j,
-.5304922463810191698502226+.9439760364018300083750242*1j,
-.4126986617510148836149955-1.065328794475513585531053*1j,
-.4126986617510148836149955+1.065328794475513585531053*1j,
-.2497697202208956030229911-1.202813187870697831365338*1j,
-.2497697202208956030229911+1.202813187870697831365338*1j]
elif N == 24:
p = [-.9055312363372773709269407-48440066540478700874836350.0e-27*1j,
-.9055312363372773709269407+48440066540478700874836350.0e-27*1j,
-.8983105104397872954053307-.1454056133873610120105857*1j,
-.8983105104397872954053307+.1454056133873610120105857*1j,
-.8837358034555706623131950-.2426335234401383076544239*1j,
-.8837358034555706623131950+.2426335234401383076544239*1j,
-.8615278304016353651120610-.3403202112618624773397257*1j,
-.8615278304016353651120610+.3403202112618624773397257*1j,
-.8312326466813240652679563-.4386985933597305434577492*1j,
-.8312326466813240652679563+.4386985933597305434577492*1j,
-.7921695462343492518845446-.5380628490968016700338001*1j,
-.7921695462343492518845446+.5380628490968016700338001*1j,
-.7433392285088529449175873-.6388084216222567930378296*1j,
-.7433392285088529449175873+.6388084216222567930378296*1j,
-.6832565803536521302816011-.7415032695091650806797753*1j,
-.6832565803536521302816011+.7415032695091650806797753*1j,
-.6096221567378335562589532-.8470292433077202380020454*1j,
-.6096221567378335562589532+.8470292433077202380020454*1j,
-.5185914574820317343536707-.9569048385259054576937721*1j,
-.5185914574820317343536707+.9569048385259054576937721*1j,
-.4027853855197518014786978-1.074195196518674765143729*1j,
-.4027853855197518014786978+1.074195196518674765143729*1j,
-.2433481337524869675825448-1.207298683731972524975429*1j,
-.2433481337524869675825448+1.207298683731972524975429*1j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561-93077131185102967450643820.0e-27*1j,
-.9028833390228020537142561+93077131185102967450643820.0e-27*1j,
-.8928551459883548836774529-.1863068969804300712287138*1j,
-.8928551459883548836774529+.1863068969804300712287138*1j,
-.8759497989677857803656239-.2798521321771408719327250*1j,
-.8759497989677857803656239+.2798521321771408719327250*1j,
-.8518616886554019782346493-.3738977875907595009446142*1j,
-.8518616886554019782346493+.3738977875907595009446142*1j,
-.8201226043936880253962552-.4686668574656966589020580*1j,
-.8201226043936880253962552+.4686668574656966589020580*1j,
-.7800496278186497225905443-.5644441210349710332887354*1j,
-.7800496278186497225905443+.5644441210349710332887354*1j,
-.7306549271849967721596735-.6616149647357748681460822*1j,
-.7306549271849967721596735+.6616149647357748681460822*1j,
-.6704827128029559528610523-.7607348858167839877987008*1j,
-.6704827128029559528610523+.7607348858167839877987008*1j,
-.5972898661335557242320528-.8626676330388028512598538*1j,
-.5972898661335557242320528+.8626676330388028512598538*1j,
-.5073362861078468845461362-.9689006305344868494672405*1j,
-.5073362861078468845461362+.9689006305344868494672405*1j,
-.3934529878191079606023847-1.082433927173831581956863*1j,
-.3934529878191079606023847+1.082433927173831581956863*1j,
-.2373280669322028974199184-1.211476658382565356579418*1j,
-.2373280669322028974199184+1.211476658382565356579418*1j]
else:
raise ValueError("Bessel Filter not supported for order %d" % N)
return z, p, k
filter_dict = {'butter': [buttap,buttord],
'butterworth' : [buttap,buttord],
'cauer' : [ellipap,ellipord],
'elliptic' : [ellipap,ellipord],
'ellip' : [ellipap,ellipord],
'bessel' : [besselap],
'cheby1' : [cheb1ap, cheb1ord],
'chebyshev1' : [cheb1ap, cheb1ord],
'chebyshevi' : [cheb1ap, cheb1ord],
'cheby2' : [cheb2ap, cheb2ord],
'chebyshev2' : [cheb2ap, cheb2ord],
'chebyshevii' : [cheb2ap, cheb2ord]
}
band_dict = {'band':'bandpass',
'bandpass':'bandpass',
'pass' : 'bandpass',
'bp':'bandpass',
'bs':'bandstop',
'bandstop':'bandstop',
'bands' : 'bandstop',
'stop' : 'bandstop',
'l' : 'lowpass',
'low': 'lowpass',
'lowpass' : 'lowpass',
'high' : 'highpass',
'highpass' : 'highpass',
'h' : 'highpass'
}
warnings.simplefilter("always", BadCoefficients)
| gpl-3.0 |
arkmaxim/grpc | src/python/grpcio/grpc/_credential_composition.py | 16 | 2109 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from grpc._cython import cygrpc
def _call(call_credentialses):
call_credentials_iterator = iter(call_credentialses)
composition = next(call_credentials_iterator)
for additional_call_credentials in call_credentials_iterator:
composition = cygrpc.call_credentials_composite(
composition, additional_call_credentials)
return composition
def call(call_credentialses):
return _call(call_credentialses)
def channel(channel_credentials, call_credentialses):
return cygrpc.channel_credentials_composite(
channel_credentials, _call(call_credentialses))
| bsd-3-clause |
sadanandb/pmt | src/mako/lexer.py | 21 | 16144 | # lexer.py
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re, codecs
from mako import parsetree, exceptions, util
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None,
disable_unicode=False,
input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
@property
def exception_kwargs(self):
return {'source':self.text,
'lineno':self.matched_lineno,
'pos':self.matched_charpos,
'filename':self.filename}
def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg()."""
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
def match_reg(self, reg):
"""match the given regular expression object to the current text position.
if a match occurs, update the current text and line position.
"""
mp = self.match_position
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'):
cp -=1
self.matched_charpos = mp - cp
self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
startpos = self.match_position
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')')
if match:
m = self.match(r'.*?%s' % match.group(1), re.S)
if not m:
raise exceptions.SyntaxException(
"Unmatched '%s'" %
match.group(1),
**self.exception_kwargs)
else:
match = self.match(r'(%s)' % r'|'.join(text))
if match:
return \
self.text[startpos:self.match_position-len(match.group(1))],\
match.group(1)
else:
match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S)
if not match:
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
kwargs.setdefault('pos', self.matched_charpos)
kwargs['filename'] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
elif node.is_primary:
self.control_line.append(node)
elif len(self.control_line) and \
not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
_coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
"""given string/unicode or bytes/string, determine encoding
from magic encoding comment, return body as unicode
or raw if decode_raw=False
"""
if isinstance(text, unicode):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text
if text.startswith(codecs.BOM_UTF8):
text = text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
else:
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m:
parsed_encoding = m.group(1)
else:
parsed_encoding = known_encoding or 'ascii'
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError, e:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
return parsed_encoding, text
def parse(self):
self.encoding, self.text = self.decode_raw_stream(self.text,
not self.disable_unicode,
self.encoding,
self.filename,)
for preproc in self.preprocessor:
self.text = preproc(self.text)
# push the match marker past the
# encoding comment.
self.match_reg(self._coding_re)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException("Unterminated control keyword: '%s'" %
self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
return self.template
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
if match:
keyword, attr, isend = match.group(1), match.group(2), match.group(3)
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
key, val1, val2 = att
text = val1 or val2
text = text.replace('\r\n', '\n')
attributes[key] = text
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>" %
match.group(1),
**self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword),
**self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
# consumed newline and whitespace
|
(?=\${) # an expression
|
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'%>')
# the trailing newline helps
# compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
text,
match.group(1)=='!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'\|', r'}')
if end == '|':
escapes, end = self.parse_until_text(r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(
parsetree.Expression,
text, escapes.strip(),
lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
if operator == '%':
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException(
"Invalid control line: '%s'" %
text,
**self.exception_kwargs)
isend, keyword = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'" %
(keyword, text),
**self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False
| epl-1.0 |
MatthewWilkes/django | django/contrib/gis/gdal/geometries.py | 337 | 24056 | """
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/ogr/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print(pnt)
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print(mpnt)
MULTIPOINT (-90 30,-90 30)
>>> print(mpnt.srs.name)
WGS 84
>>> print(mpnt.srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform_to(SpatialReference('NAD27'))
>>> print(mpnt.proj)
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print(mpnt)
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print(gt1 == 3, gt1 == 'Polygon') # Equivalence works w/non-OGRGeomType objects
True True
"""
import sys
from binascii import a2b_hex, b2a_hex
from ctypes import byref, c_char_p, c_double, c_ubyte, c_void_p, string_at
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import (
GDALException, OGRIndexError, SRSException,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
from django.contrib.gis.gdal.srs import CoordTransform, SpatialReference
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.utils import six
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, six.string_types)
# If HEX, unpack input to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = six.memoryview(a2b_hex(geom_input.upper().encode()))
str_instance = False
# Constructing the geometry,
if str_instance:
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt').encode())))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt').encode())), None, byref(c_void_p()))
elif json_m:
g = capi.from_json(geom_input.encode())
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, six.memoryview):
# WKB was passed in
g = capi.from_wkb(bytes(geom_input), None, byref(c_void_p()), len(geom_input))
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise GDALException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise GDALException('Cannot create OGR Geometry from input: %s' % str(geom_input))
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if srs:
self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
def __del__(self):
"Deletes this Geometry."
if self._ptr and capi:
capi.destroy_geom(self._ptr)
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return bytes(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr:
raise GDALException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
# ### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __ne__(self, other):
"Tests for inequality."
return not (self == other)
def __str__(self):
"WKT is used for the string representation."
return self.wkt
# #### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if dim not in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
# #### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, six.integer_types + six.string_types):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs:
return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, six.integer_types):
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
# #### Output Methods ####
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self.wkb, self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry.
"""
return capi.to_json(self.ptr)
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
return capi.to_kml(self.ptr, None)
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return six.memoryview(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
# #### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, six.integer_types + six.string_types):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
def transform_to(self, srs):
"For backwards-compatibility."
self.transform(srs)
# #### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
# #### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % str(index))
def __iter__(self):
"Iterates over each point in the LineString."
for i in range(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple(self[i] for i in range(len(self)))
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in range(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString):
pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in range(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum(self[i].point_count for i in range(self.geom_count))
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in range(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom:
capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, six.string_types):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise GDALException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum(self[i].point_count for i in range(self.geom_count))
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection):
pass
class MultiLineString(GeometryCollection):
pass
class MultiPolygon(GeometryCollection):
pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1: Point,
2: LineString,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit: Point,
2 + OGRGeomType.wkb25bit: LineString,
3 + OGRGeomType.wkb25bit: Polygon,
4 + OGRGeomType.wkb25bit: MultiPoint,
5 + OGRGeomType.wkb25bit: MultiLineString,
6 + OGRGeomType.wkb25bit: MultiPolygon,
7 + OGRGeomType.wkb25bit: GeometryCollection,
}
| bsd-3-clause |
sebrandon1/nova | nova/cmd/manage.py | 1 | 64987 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for nova management.
"""
from __future__ import print_function
import functools
import os
import sys
import decorator
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import importutils
from oslo_utils import uuidutils
import prettytable
import six
import six.moves.urllib.parse as urlparse
from nova.api.ec2 import ec2utils
from nova import availability_zones
from nova.cmd import common as cmd_common
import nova.conf
from nova import config
from nova import context
from nova import db
from nova.db import migration
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import aggregate as aggregate_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_group as instance_group_obj
from nova.objects import keypair as keypair_obj
from nova.objects import request_spec
from nova import quota
from nova import rpc
from nova import utils
from nova import version
CONF = nova.conf.CONF
QUOTAS = quota.QUOTAS
_EXTRA_DEFAULT_LOG_LEVELS = ['oslo_db=INFO']
# Decorators for actions
args = cmd_common.args
def param2id(object_id):
"""Helper function to convert various volume id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if '-' in object_id:
return ec2utils.ec2_vol_id_to_uuid(object_id)
else:
return object_id
class VpnCommands(object):
"""Class for managing VPNs."""
description = ('DEPRECATED: VPN commands are deprecated since '
'nova-network is deprecated in favor of Neutron. The '
'VPN commands will be removed in the Nova 15.0.0 '
'Ocata release.')
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--ip', metavar='<IP Address>', help='IP Address')
@args('--port', metavar='<Port>', help='Port')
def change(self, project_id, ip, port):
"""Change the IP and port for a VPN.
This will update all networks associated with a project
not sure if that's the desired behavior or not, patches accepted.
"""
# TODO(tr3buchet): perhaps this shouldn't update all networks
# associated with a project in the future
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_update(admin_context,
network['id'],
{'vpn_public_address': ip,
'vpn_public_port': int(port)})
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', metavar='<bpython|ipython|python >',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
from IPython import embed
embed()
except ImportError:
try:
# Ipython < 0.11
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
import IPython
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
# no IPython module
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', metavar='<path>', help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print(caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'nova-manage db sync' before running this command."))
sys.exit(1)
class ProjectCommands(object):
"""Class for managing projects."""
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--user', dest='user_id', metavar='<User name>',
help='User name')
@args('--key', metavar='<key>', help='Key')
@args('--value', metavar='<value>', help='Value')
def quota(self, project_id, user_id=None, key=None, value=None):
"""Create, update or display quotas for project/user
If no quota key is provided, the quota will be displayed.
If a valid quota key is provided and it does not exist,
it will be created. Otherwise, it will be updated.
"""
ctxt = context.get_admin_context()
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
user_id = None
quota = QUOTAS.get_project_quotas(ctxt, project_id)
# if key is None, that means we need to show the quotas instead
# of updating them
if key:
settable_quotas = QUOTAS.get_settable_quotas(ctxt,
project_id,
user_id=user_id)
if key in quota:
minimum = settable_quotas[key]['minimum']
maximum = settable_quotas[key]['maximum']
if value.lower() == 'unlimited':
value = -1
if int(value) < -1:
print(_('Quota limit must be -1 or greater.'))
return(2)
if ((int(value) < minimum) and
(maximum != -1 or (maximum == -1 and int(value) != -1))):
print(_('Quota limit must be greater than %s.') % minimum)
return(2)
if maximum != -1 and int(value) > maximum:
print(_('Quota limit must be less than %s.') % maximum)
return(2)
try:
db.quota_create(ctxt, project_id, key, value,
user_id=user_id)
except exception.QuotaExists:
db.quota_update(ctxt, project_id, key, value,
user_id=user_id)
else:
print(_('%(key)s is not a valid quota key. Valid options are: '
'%(options)s.') % {'key': key,
'options': ', '.join(quota)})
return(2)
print_format = "%-36s %-10s %-10s %-10s"
print(print_format % (
_('Quota'),
_('Limit'),
_('In Use'),
_('Reserved')))
# Retrieve the quota after update
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
quota = QUOTAS.get_project_quotas(ctxt, project_id)
for key, value in six.iteritems(quota):
if value['limit'] is None or value['limit'] < 0:
value['limit'] = 'unlimited'
print(print_format % (key, value['limit'], value['in_use'],
value['reserved']))
@args('--project', dest='project_id', metavar='<Project Id>',
help='Project Id', required=True)
@args('--user', dest='user_id', metavar='<User Id>',
help='User Id')
@args('--key', metavar='<key>', help='Key')
def quota_usage_refresh(self, project_id, user_id=None, key=None):
"""Refresh the quotas for project/user
If no quota key is provided, all the quota usages will be refreshed.
If a valid quota key is provided and it does not exist,
it will be created. Otherwise, it will be refreshed.
"""
ctxt = context.get_admin_context()
keys = None
if key:
keys = [key]
try:
QUOTAS.usage_refresh(ctxt, project_id, user_id, keys)
except exception.QuotaUsageRefreshNotAllowed as e:
print(e.format_message())
return 2
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
def scrub(self, project_id):
"""DEPRECATED: Deletes network data associated with project.
This command is only for nova-network deployments and nova-network is
deprecated in favor of Neutron. This command will be removed in the
Nova 15.0.0 Ocata release.
"""
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_disassociate(admin_context, network['id'])
groups = db.security_group_get_by_project(admin_context, project_id)
for group in groups:
db.security_group_destroy(admin_context, group['id'])
AccountCommands = ProjectCommands
class FixedIpCommands(object):
"""Class for managing fixed IP."""
description = ('DEPRECATED: Fixed IP commands are deprecated since '
'nova-network is deprecated in favor of Neutron. The '
'fixed IP commands will be removed in the Nova 15.0.0 '
'Ocata release.')
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all fixed IPs (optionally by host)."""
ctxt = context.get_admin_context()
try:
if host is None:
fixed_ips = db.fixed_ip_get_all(ctxt)
else:
fixed_ips = db.fixed_ip_get_by_host(ctxt, host)
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
instances = db.instance_get_all(context.get_admin_context())
instances_by_uuid = {}
for instance in instances:
instances_by_uuid[instance['uuid']] = instance
print("%-18s\t%-15s\t%-15s\t%s" % (_('network'),
_('IP address'),
_('hostname'),
_('host')))
all_networks = {}
try:
# use network_get_all to retrieve all existing networks
# this is to ensure that IPs associated with deleted networks
# will not throw exceptions.
for network in db.network_get_all(context.get_admin_context()):
all_networks[network.id] = network
except exception.NoNetworksFound:
# do not have any networks, so even if there are IPs, these
# IPs should have been deleted ones, so return.
print(_('No fixed IP found.'))
return
has_ip = False
for fixed_ip in fixed_ips:
hostname = None
host = None
network = all_networks.get(fixed_ip['network_id'])
if network:
has_ip = True
if fixed_ip.get('instance_uuid'):
instance = instances_by_uuid.get(fixed_ip['instance_uuid'])
if instance:
hostname = instance['hostname']
host = instance['host']
else:
print(_('WARNING: fixed IP %s allocated to missing'
' instance') % str(fixed_ip['address']))
print("%-18s\t%-15s\t%-15s\t%s" % (
network['cidr'],
fixed_ip['address'],
hostname, host))
if not has_ip:
print(_('No fixed IP found.'))
@args('--address', metavar='<ip address>', help='IP address')
def reserve(self, address):
"""Mark fixed IP as reserved
arguments: address
"""
return self._set_reserved(address, True)
@args('--address', metavar='<ip address>', help='IP address')
def unreserve(self, address):
"""Mark fixed IP as free to use
arguments: address
"""
return self._set_reserved(address, False)
def _set_reserved(self, address, reserved):
ctxt = context.get_admin_context()
try:
fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
if fixed_ip is None:
raise exception.NotFound('Could not find address')
db.fixed_ip_update(ctxt, fixed_ip['address'],
{'reserved': reserved})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
class FloatingIpCommands(object):
"""Class for managing floating IP."""
description = ('DEPRECATED: Floating IP commands are deprecated since '
'nova-network is deprecated in favor of Neutron. The '
'floating IP commands will be removed in the Nova 15.0.0 '
'Ocata release.')
@staticmethod
def address_to_hosts(addresses):
"""Iterate over hosts within an address range.
If an explicit range specifier is missing, the parameter is
interpreted as a specific individual address.
"""
try:
return [netaddr.IPAddress(addresses)]
except ValueError:
net = netaddr.IPNetwork(addresses)
if net.size < 4:
reason = _("/%s should be specified as single address(es) "
"not in cidr format") % net.prefixlen
raise exception.InvalidInput(reason=reason)
elif net.size >= 1000000:
# NOTE(dripton): If we generate a million IPs and put them in
# the database, the system will slow to a crawl and/or run
# out of memory and crash. This is clearly a misconfiguration.
reason = _("Too many IP addresses will be generated. Please "
"increase /%s to reduce the number generated."
) % net.prefixlen
raise exception.InvalidInput(reason=reason)
else:
return net.iter_hosts()
@args('--ip_range', metavar='<range>', help='IP range')
@args('--pool', metavar='<pool>', help='Optional pool')
@args('--interface', metavar='<interface>', help='Optional interface')
def create(self, ip_range, pool=None, interface=None):
"""Creates floating IPs for zone by range."""
admin_context = context.get_admin_context()
if not pool:
pool = CONF.default_floating_pool
if not interface:
interface = CONF.public_interface
ips = [{'address': str(address), 'pool': pool, 'interface': interface}
for address in self.address_to_hosts(ip_range)]
try:
db.floating_ip_bulk_create(admin_context, ips, want_result=False)
except exception.FloatingIpExists as exc:
# NOTE(simplylizz): Maybe logging would be better here
# instead of printing, but logging isn't used here and I
# don't know why.
print('error: %s' % exc)
return(1)
@args('--ip_range', metavar='<range>', help='IP range')
def delete(self, ip_range):
"""Deletes floating IPs by range."""
admin_context = context.get_admin_context()
ips = ({'address': str(address)}
for address in self.address_to_hosts(ip_range))
db.floating_ip_bulk_destroy(admin_context, ips)
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all floating IPs (optionally by host).
Note: if host is given, only active floating IPs are returned
"""
ctxt = context.get_admin_context()
try:
if host is None:
floating_ips = db.floating_ip_get_all(ctxt)
else:
floating_ips = db.floating_ip_get_all_by_host(ctxt, host)
except exception.NoFloatingIpsDefined:
print(_("No floating IP addresses have been defined."))
return
for floating_ip in floating_ips:
instance_uuid = None
if floating_ip['fixed_ip_id']:
fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id'])
instance_uuid = fixed_ip['instance_uuid']
print("%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'],
floating_ip['address'],
instance_uuid,
floating_ip['pool'],
floating_ip['interface']))
@decorator.decorator
def validate_network_plugin(f, *args, **kwargs):
"""Decorator to validate the network plugin."""
if utils.is_neutron():
print(_("ERROR: Network commands are not supported when using the "
"Neutron API. Use python-neutronclient instead."))
return(2)
return f(*args, **kwargs)
class NetworkCommands(object):
"""Class for managing networks."""
description = ('DEPRECATED: Network commands are deprecated since '
'nova-network is deprecated in favor of Neutron. The '
'network commands will be removed in the Nova 15.0.0 Ocata '
'release.')
@validate_network_plugin
@args('--label', metavar='<label>', help='Label for network (ex: public)')
@args('--fixed_range_v4', dest='cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet (ex: 10.0.0.0/8)')
@args('--num_networks', metavar='<number>',
help='Number of networks to create')
@args('--network_size', metavar='<number>',
help='Number of IPs per network')
@args('--vlan', metavar='<vlan id>', help='vlan id')
@args('--vlan_start', dest='vlan_start', metavar='<vlan start id>',
help='vlan start id')
@args('--vpn', dest='vpn_start', help='vpn start')
@args('--fixed_range_v6', dest='cidr_v6',
help='IPv6 subnet (ex: fe80::/64')
@args('--gateway', help='gateway')
@args('--gateway_v6', help='ipv6 gateway')
@args('--bridge', metavar='<bridge>',
help='VIFs on this network are connected to this bridge')
@args('--bridge_interface', metavar='<bridge interface>',
help='the bridge is connected to this interface')
@args('--multi_host', metavar="<'T'|'F'>",
help='Multi host')
@args('--dns1', metavar="<DNS Address>", help='First DNS')
@args('--dns2', metavar="<DNS Address>", help='Second DNS')
@args('--uuid', metavar="<network uuid>", help='Network UUID')
@args('--fixed_cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)')
@args('--project_id', metavar="<project id>",
help='Project id')
@args('--priority', metavar="<number>", help='Network interface priority')
def create(self, label=None, cidr=None, num_networks=None,
network_size=None, multi_host=None, vlan=None,
vlan_start=None, vpn_start=None, cidr_v6=None, gateway=None,
gateway_v6=None, bridge=None, bridge_interface=None,
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
"""Creates fixed IPs for host by range."""
# NOTE(gmann): These checks are moved here as API layer does all these
# validation through JSON schema.
if not label:
raise exception.NetworkNotCreated(req="label")
if len(label) > 255:
raise exception.LabelTooLong()
if not (cidr or cidr_v6):
raise exception.NetworkNotCreated(req="cidr or cidr_v6")
kwargs = {k: v for k, v in six.iteritems(locals())
if v and k != "self"}
if multi_host is not None:
kwargs['multi_host'] = multi_host == 'T'
net_manager = importutils.import_object(CONF.network_manager)
net_manager.create_networks(context.get_admin_context(), **kwargs)
@validate_network_plugin
def list(self):
"""List all created networks."""
_fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"
print(_fmt % (_('id'),
_('IPv4'),
_('IPv6'),
_('start address'),
_('DNS1'),
_('DNS2'),
_('VlanID'),
_('project'),
_("uuid")))
try:
# Since network_get_all can throw exception.NoNetworksFound
# for this command to show a nice result, this exception
# should be caught and handled as such.
networks = db.network_get_all(context.get_admin_context())
except exception.NoNetworksFound:
print(_('No networks found'))
else:
for network in networks:
print(_fmt % (network.id,
network.cidr,
network.cidr_v6,
network.dhcp_start,
network.dns1,
network.dns2,
network.vlan,
network.project_id,
network.uuid))
@validate_network_plugin
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to delete')
@args('--uuid', metavar='<uuid>', help='UUID of network to delete')
def delete(self, fixed_range=None, uuid=None):
"""Deletes a network."""
if fixed_range is None and uuid is None:
raise Exception(_("Please specify either fixed_range or uuid"))
net_manager = importutils.import_object(CONF.network_manager)
if "NeutronManager" in CONF.network_manager:
if uuid is None:
raise Exception(_("UUID is required to delete "
"Neutron Networks"))
if fixed_range:
raise Exception(_("Deleting by fixed_range is not supported "
"with the NeutronManager"))
# delete the network
net_manager.delete_network(context.get_admin_context(),
fixed_range, uuid)
@validate_network_plugin
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to modify')
@args('--project', metavar='<project name>',
help='Project name to associate')
@args('--host', metavar='<host>', help='Host to associate')
@args('--disassociate-project', action="store_true", dest='dis_project',
default=False, help='Disassociate Network from Project')
@args('--disassociate-host', action="store_true", dest='dis_host',
default=False, help='Disassociate Host from Project')
def modify(self, fixed_range, project=None, host=None,
dis_project=None, dis_host=None):
"""Associate/Disassociate Network with Project and/or Host
arguments: network project host
leave any field blank to ignore it
"""
admin_context = context.get_admin_context()
network = db.network_get_by_cidr(admin_context, fixed_range)
net = {}
# User can choose the following actions each for project and host.
# 1) Associate (set not None value given by project/host parameter)
# 2) Disassociate (set None by disassociate parameter)
# 3) Keep unchanged (project/host key is not added to 'net')
if dis_project:
net['project_id'] = None
if dis_host:
net['host'] = None
# The --disassociate-X are boolean options, but if they user
# mistakenly provides a value, it will be used as a positional argument
# and be erroneously interpreted as some other parameter (e.g.
# a project instead of host value). The safest thing to do is error-out
# with a message indicating that there is probably a problem with
# how the disassociate modifications are being used.
if dis_project or dis_host:
if project or host:
error_msg = "ERROR: Unexpected arguments provided. Please " \
"use separate commands."
print(error_msg)
return(1)
db.network_update(admin_context, network['id'], net)
return
if project:
net['project_id'] = project
if host:
net['host'] = host
db.network_update(admin_context, network['id'], net)
class HostCommands(object):
"""List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
args: [zone]
"""
print("%-25s\t%-15s" % (_('host'),
_('zone')))
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print("%-25s\t%-15s" % (h['host'], h['availability_zone']))
class DbCommands(object):
"""Class for managing the main database."""
online_migrations = (
# Added in Mitaka
db.aggregate_uuids_online_data_migration,
# Added in Newton
flavor_obj.migrate_flavors,
# Added in Newton
flavor_obj.migrate_flavor_reset_autoincrement,
# Added in Newton
instance_obj.migrate_instance_keypairs,
# Added in Newton
request_spec.migrate_instances_add_request_spec,
# Added in Newton
keypair_obj.migrate_keypairs_to_api_db,
# Added in Newton
aggregate_obj.migrate_aggregates,
# Added in Newton
aggregate_obj.migrate_aggregate_reset_autoincrement,
# Added in Newton
instance_group_obj.migrate_instance_groups_to_api_db,
)
def __init__(self):
pass
@args('--version', metavar='<version>', help='Database version')
@args('--local_cell', action='store_true',
help='Only sync db in the local cell: do not attempt to fan-out'
'to all cells')
def sync(self, version=None, local_cell=False):
"""Sync the database up to the most recent version."""
if not local_cell:
ctxt = context.RequestContext()
# NOTE(mdoff): Multiple cells not yet implemented. Currently
# fanout only looks for cell0.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt,
objects.CellMapping.CELL0_UUID)
with context.target_cell(ctxt, cell_mapping):
migration.db_sync(version, context=ctxt)
except exception.CellMappingNotFound:
print(_('WARNING: cell0 mapping not found - not'
' syncing cell0.'))
except Exception:
print(_('ERROR: could not access cell mapping database - has'
' api db been created?'))
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version())
@args('--max_rows', metavar='<number>', default=1000,
help='Maximum number of deleted rows to archive')
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Print how many rows were archived per table.')
@args('--until-complete', action='store_true', dest='until_complete',
default=False,
help=('Run continuously until all deleted rows are archived. Use '
'max_rows as a batch size for each iteration.'))
def archive_deleted_rows(self, max_rows, verbose=False,
until_complete=False):
"""Move deleted rows from production tables to shadow tables.
Returns 0 if nothing was archived, 1 if some number of rows were
archived, 2 if max_rows is invalid. If automating, this should be
run continuously while the result is 1, stopping at 0.
"""
max_rows = int(max_rows)
if max_rows < 0:
print(_("Must supply a positive value for max_rows"))
return(2)
if max_rows > db.MAX_INT:
print(_('max rows must be <= %(max_value)d') %
{'max_value': db.MAX_INT})
return(2)
table_to_rows_archived = {}
if until_complete and verbose:
sys.stdout.write(_('Archiving') + '..') # noqa
while True:
try:
run = db.archive_deleted_rows(max_rows)
except KeyboardInterrupt:
run = {}
if until_complete and verbose:
print('.' + _('stopped')) # noqa
break
for k, v in run.items():
table_to_rows_archived.setdefault(k, 0)
table_to_rows_archived[k] += v
if not until_complete:
break
elif not run:
if verbose:
print('.' + _('complete')) # noqa
break
if verbose:
sys.stdout.write('.')
if verbose:
if table_to_rows_archived:
utils.print_dict(table_to_rows_archived, _('Table'),
dict_value=_('Number of Rows Archived'))
else:
print(_('Nothing was archived.'))
# NOTE(danms): Return nonzero if we archived something
return int(bool(table_to_rows_archived))
@args('--delete', action='store_true', dest='delete',
help='If specified, automatically delete any records found where '
'instance_uuid is NULL.')
def null_instance_uuid_scan(self, delete=False):
"""Lists and optionally deletes database records where
instance_uuid is NULL.
"""
hits = migration.db_null_instance_uuid_scan(delete)
records_found = False
for table_name, records in six.iteritems(hits):
# Don't print anything for 0 hits
if records:
records_found = True
if delete:
print(_("Deleted %(records)d records "
"from table '%(table_name)s'.") %
{'records': records, 'table_name': table_name})
else:
print(_("There are %(records)d records in the "
"'%(table_name)s' table where the uuid or "
"instance_uuid column is NULL. Run this "
"command again with the --delete option after you "
"have backed up any necessary data.") %
{'records': records, 'table_name': table_name})
# check to see if we didn't find anything
if not records_found:
print(_('There were no records found where '
'instance_uuid was NULL.'))
def _run_migration(self, ctxt, max_count):
ran = 0
migrations = {}
for migration_meth in self.online_migrations:
count = max_count - ran
try:
found, done = migration_meth(ctxt, count)
except Exception:
print(_("Error attempting to run %(method)s") % dict(
method=migration_meth))
found = done = 0
name = migration_meth.__name__
if found:
print(_('%(total)i rows matched query %(meth)s, %(done)i '
'migrated') % {'total': found,
'meth': name,
'done': done})
migrations.setdefault(name, (0, 0))
migrations[name] = (migrations[name][0] + found,
migrations[name][1] + done)
if max_count is not None:
ran += done
if ran >= max_count:
break
return migrations
@args('--max-count', metavar='<number>', dest='max_count',
help='Maximum number of objects to consider')
def online_data_migrations(self, max_count=None):
ctxt = context.get_admin_context()
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
unlimited = False
if max_count < 1:
print(_('Must supply a positive value for max_number'))
return 127
else:
unlimited = True
max_count = 50
print(_('Running batches of %i until complete') % max_count)
ran = None
migration_info = {}
while ran is None or ran != 0:
migrations = self._run_migration(ctxt, max_count)
migration_info.update(migrations)
ran = sum([done for found, done in migrations.values()])
if not unlimited:
break
t = prettytable.PrettyTable([_('Migration'),
_('Total Needed'),
_('Completed')])
for name in sorted(migration_info.keys()):
info = migration_info[name]
t.add_row([name, info[0], info[1]])
print(t)
return ran and 1 or 0
class ApiDbCommands(object):
"""Class for managing the api database."""
def __init__(self):
pass
@args('--version', metavar='<version>', help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version, database='api')
def version(self):
"""Print the current database version."""
print(migration.db_version(database='api'))
class AgentBuildCommands(object):
"""Class for managing agent builds."""
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def create(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Creates a new agent build."""
ctxt = context.get_admin_context()
db.agent_build_create(ctxt, {'hypervisor': hypervisor,
'os': os,
'architecture': architecture,
'version': version,
'url': url,
'md5hash': md5hash})
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def delete(self, os, architecture, hypervisor='xen'):
"""Deletes an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_destroy(ctxt, agent_build_ref['id'])
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: None)')
def list(self, hypervisor=None):
"""Lists all agent builds.
arguments: <none>
"""
fmt = "%-10s %-8s %12s %s"
ctxt = context.get_admin_context()
by_hypervisor = {}
for agent_build in db.agent_build_get_all(ctxt):
buildlist = by_hypervisor.get(agent_build.hypervisor)
if not buildlist:
buildlist = by_hypervisor[agent_build.hypervisor] = []
buildlist.append(agent_build)
for key, buildlist in six.iteritems(by_hypervisor):
if hypervisor and key != hypervisor:
continue
print(_('Hypervisor: %s') % key)
print(fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32))
for agent_build in buildlist:
print(fmt % (agent_build.os, agent_build.architecture,
agent_build.version, agent_build.md5hash))
print(' %s' % agent_build.url)
print()
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def modify(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Update an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_update(ctxt, agent_build_ref['id'],
{'version': version,
'url': url,
'md5hash': md5hash})
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
linenum = len(lines) - index
print((_('Line %(linenum)d : %(line)s') %
{'linenum': linenum, 'line': line}))
if error_found == 0:
print(_('No errors in logfiles!'))
@args('--num_entries', metavar='<number of entries>',
help='number of entries(default: 10)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the nova syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_('Unable to find system log file!'))
return(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_('Last %s nova syslog entries:-') % (entries))
for line in lines:
if line.find("nova") > 0:
count += 1
print("%s" % (line))
if count == entries:
break
if count == 0:
print(_('No nova entries in syslog!'))
class CellCommands(object):
"""Commands for managing cells."""
def _create_transport_hosts(self, username, password,
broker_hosts=None, hostname=None, port=None):
"""Returns a list of oslo.messaging.TransportHost objects."""
transport_hosts = []
# Either broker-hosts or hostname should be set
if broker_hosts:
hosts = broker_hosts.split(',')
for host in hosts:
host = host.strip()
broker_hostname, broker_port = utils.parse_server_string(host)
if not broker_port:
msg = _('Invalid broker_hosts value: %s. It should be'
' in hostname:port format') % host
raise ValueError(msg)
try:
broker_port = int(broker_port)
except ValueError:
msg = _('Invalid port value: %s. It should be '
'an integer') % broker_port
raise ValueError(msg)
transport_hosts.append(
messaging.TransportHost(
hostname=broker_hostname,
port=broker_port,
username=username,
password=password))
else:
try:
port = int(port)
except ValueError:
msg = _("Invalid port value: %s. Should be an integer") % port
raise ValueError(msg)
transport_hosts.append(
messaging.TransportHost(
hostname=hostname,
port=port,
username=username,
password=password))
return transport_hosts
@args('--name', metavar='<name>', help='Name for the new cell')
@args('--cell_type', metavar='<parent|api|child|compute>',
help='Whether the cell is parent/api or child/compute')
@args('--username', metavar='<username>',
help='Username for the message broker in this cell')
@args('--password', metavar='<password>',
help='Password for the message broker in this cell')
@args('--broker_hosts', metavar='<broker_hosts>',
help='Comma separated list of message brokers in this cell. '
'Each Broker is specified as hostname:port with both '
'mandatory. This option overrides the --hostname '
'and --port options (if provided). ')
@args('--hostname', metavar='<hostname>',
help='Address of the message broker in this cell')
@args('--port', metavar='<number>',
help='Port number of the message broker in this cell')
@args('--virtual_host', metavar='<virtual_host>',
help='The virtual host of the message broker in this cell')
@args('--woffset', metavar='<float>')
@args('--wscale', metavar='<float>')
def create(self, name, cell_type='child', username=None, broker_hosts=None,
password=None, hostname=None, port=None, virtual_host=None,
woffset=None, wscale=None):
if cell_type not in ['parent', 'child', 'api', 'compute']:
print("Error: cell type must be 'parent'/'api' or "
"'child'/'compute'")
return(2)
# Set up the transport URL
transport_hosts = self._create_transport_hosts(
username, password,
broker_hosts, hostname,
port)
transport_url = rpc.get_transport_url()
transport_url.hosts.extend(transport_hosts)
transport_url.virtual_host = virtual_host
is_parent = False
if cell_type in ['api', 'parent']:
is_parent = True
values = {'name': name,
'is_parent': is_parent,
'transport_url': urlparse.unquote(str(transport_url)),
'weight_offset': float(woffset),
'weight_scale': float(wscale)}
ctxt = context.get_admin_context()
db.cell_create(ctxt, values)
@args('--cell_name', metavar='<cell_name>',
help='Name of the cell to delete')
def delete(self, cell_name):
ctxt = context.get_admin_context()
db.cell_delete(ctxt, cell_name)
def list(self):
ctxt = context.get_admin_context()
cells = db.cell_get_all(ctxt)
fmt = "%3s %-10s %-6s %-10s %-15s %-5s %-10s"
print(fmt % ('Id', 'Name', 'Type', 'Username', 'Hostname',
'Port', 'VHost'))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
for cell in cells:
url = rpc.get_transport_url(cell.transport_url)
host = url.hosts[0] if url.hosts else messaging.TransportHost()
print(fmt % (cell.id, cell.name,
'parent' if cell.is_parent else 'child',
host.username, host.hostname,
host.port, url.virtual_host))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
class CellV2Commands(object):
"""Commands for managing cells v2."""
# TODO(melwitt): Remove this when the oslo.messaging function
# for assembling a transport url from ConfigOpts is available
@args('--transport-url', metavar='<transport url>', required=True,
dest='transport_url',
help='The transport url for the cell message queue')
def simple_cell_setup(self, transport_url):
"""Simple cellsv2 setup.
This simplified command is for use by existing non-cells users to
configure the default environment. If you are using CellsV1, this
will not work for you. Returns 0 if setup is completed (or has
already been done), 1 if no hosts are reporting (and this cannot
be mapped) and 2 if run in a CellsV1 environment.
"""
if CONF.cells.enable:
print('CellsV1 users cannot use this simplified setup command')
return 2
ctxt = context.RequestContext()
try:
cell0_mapping = self.map_cell0()
except db_exc.DBDuplicateEntry:
print('Already setup, nothing to do.')
return 0
# Run migrations so cell0 is usable
with context.target_cell(ctxt, cell0_mapping):
migration.db_sync(None, context=ctxt)
cell_uuid = self._map_cell_and_hosts(transport_url)
if cell_uuid is None:
# There are no compute hosts which means no cell_mapping was
# created. This should also mean that there are no instances.
return 1
self.map_instances(cell_uuid)
return 0
@args('--database_connection',
metavar='<database_connection>',
help='The database connection url for cell0. '
'This is optional. If not provided, a standard database '
'connection will be used based on the API database connection '
'from the Nova configuration.'
)
def map_cell0(self, database_connection=None):
"""Create a cell mapping for cell0.
cell0 is used for instances that have not been scheduled to any cell.
This generally applies to instances that have encountered an error
before they have been scheduled.
This command creates a cell mapping for this special cell which
requires a database to store the instance data.
"""
def cell0_default_connection():
# If no database connection is provided one is generated
# based on the API database connection url.
# The cell0 database will use the same database scheme and
# netloc as the API database, with a related path.
scheme, netloc, path, query, fragment = \
urlparse.urlsplit(CONF.api_database.connection)
root, ext = os.path.splitext(path)
path = root + "_cell0" + ext
return urlparse.urlunsplit((scheme, netloc, path, query,
fragment))
dbc = database_connection or cell0_default_connection()
ctxt = context.RequestContext()
# A transport url of 'none://' is provided for cell0. RPC should not
# be used to access cell0 objects. Cells transport switching will
# ignore any 'none' transport type.
cell_mapping = objects.CellMapping(
ctxt, uuid=objects.CellMapping.CELL0_UUID, name="cell0",
transport_url="none:///",
database_connection=dbc)
cell_mapping.create()
return cell_mapping
def _get_and_map_instances(self, ctxt, cell_mapping, limit, marker):
filters = {}
instances = objects.InstanceList.get_by_filters(
ctxt.elevated(read_deleted='yes'), filters,
sort_key='created_at', sort_dir='asc', limit=limit,
marker=marker)
for instance in instances:
try:
mapping = objects.InstanceMapping(ctxt)
mapping.instance_uuid = instance.uuid
mapping.cell_mapping = cell_mapping
mapping.project_id = instance.project_id
mapping.create()
except db_exc.DBDuplicateEntry:
continue
if len(instances) == 0 or len(instances) < limit:
# We've hit the end of the instances table
marker = None
else:
marker = instances[-1].uuid
return marker
@args('--cell_uuid', metavar='<cell_uuid>', required=True,
help='Unmigrated instances will be mapped to the cell with the '
'uuid provided.')
@args('--max-count', metavar='<max_count>',
help='Maximum number of instances to map')
def map_instances(self, cell_uuid, max_count=None):
"""Map instances into the provided cell.
This assumes that Nova on this host is still configured to use the nova
database not just the nova-api database. Instances in the nova database
will be queried from oldest to newest and mapped to the provided cell.
A max-count can be set on the number of instance to map in a single
run. Repeated runs of the command will start from where the last run
finished so it is not necessary to increase max-count to finish. An
exit code of 0 indicates that all instances have been mapped.
"""
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
map_all = False
if max_count < 1:
print(_('Must supply a positive value for max-count'))
return 127
else:
map_all = True
max_count = 50
ctxt = context.RequestContext()
marker_project_id = 'INSTANCE_MIGRATION_MARKER'
# Validate the cell exists, this will raise if not
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# Check for a marker from a previous run
marker_mapping = objects.InstanceMappingList.get_by_project_id(ctxt,
marker_project_id)
if len(marker_mapping) == 0:
marker = None
else:
# There should be only one here
marker = marker_mapping[0].instance_uuid.replace(' ', '-')
marker_mapping[0].destroy()
next_marker = True
while next_marker is not None:
next_marker = self._get_and_map_instances(ctxt, cell_mapping,
max_count, marker)
marker = next_marker
if not map_all:
break
if next_marker:
# Don't judge me. There's already an InstanceMapping with this UUID
# so the marker needs to be non destructively modified.
next_marker = next_marker.replace('-', ' ')
objects.InstanceMapping(ctxt, instance_uuid=next_marker,
project_id=marker_project_id).create()
return 1
return 0
def _map_cell_and_hosts(self, transport_url, name=None, verbose=False):
ctxt = context.RequestContext()
cell_mapping_uuid = cell_mapping = None
# First, try to detect if a CellMapping has already been created
compute_nodes = objects.ComputeNodeList.get_all(ctxt)
if not compute_nodes:
print(_('No hosts found to map to cell, exiting.'))
return None
missing_nodes = set()
for compute_node in compute_nodes:
try:
host_mapping = objects.HostMapping.get_by_host(
ctxt, compute_node.host)
except exception.HostMappingNotFound:
missing_nodes.add(compute_node.host)
else:
if verbose:
print(_(
'Host %(host)s is already mapped to cell %(uuid)s'
) % {'host': host_mapping.host,
'uuid': host_mapping.cell_mapping.uuid})
# Re-using the existing UUID in case there is already a mapping
# NOTE(sbauza): There could be possibly multiple CellMappings
# if the operator provides another configuration file and moves
# the hosts to another cell v2, but that's not really something
# we should support.
cell_mapping_uuid = host_mapping.cell_mapping.uuid
if not missing_nodes:
print(_('All hosts are already mapped to cell(s), exiting.'))
return cell_mapping_uuid
# Create the cell mapping in the API database
if cell_mapping_uuid is not None:
cell_mapping = objects.CellMapping.get_by_uuid(
ctxt, cell_mapping_uuid)
if cell_mapping is None:
cell_mapping_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(
ctxt, uuid=cell_mapping_uuid, name=name,
transport_url=transport_url,
database_connection=CONF.database.connection)
cell_mapping.create()
# Pull the hosts from the cell database and create the host mappings
for compute_host in missing_nodes:
host_mapping = objects.HostMapping(
ctxt, host=compute_host, cell_mapping=cell_mapping)
host_mapping.create()
if verbose:
print(cell_mapping_uuid)
return cell_mapping_uuid
@args('--transport-url', metavar='<transport url>', dest='transport_url',
help='The transport url for the cell message queue')
@args('--name', metavar='<name>', help='The name of the cell')
@args('--verbose', action='store_true',
help='Return and output the uuid of the created cell')
def map_cell_and_hosts(self, transport_url=None, name=None, verbose=False):
"""EXPERIMENTAL. Create a cell mapping and host mappings for a cell.
Users not dividing their cloud into multiple cells will be a single
cell v2 deployment and should specify:
nova-manage cell_v2 map_cell_and_hosts --config-file <nova.conf>
Users running multiple cells can add a cell v2 by specifying:
nova-manage cell_v2 map_cell_and_hosts --config-file <cell nova.conf>
"""
transport_url = CONF.transport_url or transport_url
if not transport_url:
print('Must specify --transport-url if [DEFAULT]/transport_url '
'is not set in the configuration file.')
return 1
self._map_cell_and_hosts(transport_url, name, verbose)
# online_data_migrations established a pattern of 0 meaning everything
# is done, 1 means run again to do more work. This command doesn't do
# partial work so 0 is appropriate.
return 0
@args('--uuid', metavar='<uuid>', dest='uuid', required=True,
help=_('The instance UUID to verify'))
@args('--quiet', action='store_true', dest='quiet',
help=_('Do not print anything'))
def verify_instance(self, uuid, quiet=False):
"""Verify instance mapping to a cell.
This command is useful to determine if the cellsv2 environment is
properly setup, specifically in terms of the cell, host, and instance
mapping records required.
This prints one of three strings (and exits with a code) indicating
whether the instance is successfully mapped to a cell (0), is unmapped
due to an incomplete upgrade (1), or unmapped due to normally transient
state (2).
"""
if not uuid:
print(_('Must specify --uuid'))
return 16
def say(string):
if not quiet:
print(string)
ctxt = context.RequestContext()
try:
mapping = objects.InstanceMapping.get_by_instance_uuid(
ctxt, uuid)
except exception.InstanceMappingNotFound:
say('Instance %s is not mapped to a cell '
'(upgrade is incomplete)' % uuid)
return 1
if mapping.cell_mapping is None:
say('Instance %s is not mapped to a cell' % uuid)
return 2
else:
say('Instance %s is in cell: %s (%s)' % (
uuid,
mapping.cell_mapping.name,
mapping.cell_mapping.uuid))
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
help='If provided only this cell will be searched for new hosts to '
'map.')
def discover_hosts(self, cell_uuid=None):
"""Searches cells, or a single cell, and maps found hosts.
When a new host is added to a deployment it will add a service entry
to the db it's configured to use. This command will check the db for
each cell, or a single one if passed in, and map any hosts which are
not currently mapped. If a host is already mapped nothing will be done.
"""
ctxt = context.RequestContext()
# TODO(alaski): If this is not run on a host configured to use the API
# database most of the lookups below will fail and may not provide a
# great error message. Add a check which will raise a useful error
# message about running this from an API host.
if cell_uuid:
cell_mappings = [objects.CellMapping.get_by_uuid(ctxt, cell_uuid)]
else:
cell_mappings = objects.CellMappingList.get_all(context)
for cell_mapping in cell_mappings:
# TODO(alaski): Factor this into helper method on CellMapping
if cell_mapping.uuid == cell_mapping.CELL0_UUID:
continue
with context.target_cell(ctxt, cell_mapping):
compute_nodes = objects.ComputeNodeList.get_all(ctxt)
for compute in compute_nodes:
try:
objects.HostMapping.get_by_host(ctxt, compute.host)
except exception.HostMappingNotFound:
host_mapping = objects.HostMapping(
ctxt, host=compute.host,
cell_mapping=cell_mapping)
host_mapping.create()
CATEGORIES = {
'account': AccountCommands,
'agent': AgentBuildCommands,
'api_db': ApiDbCommands,
'cell': CellCommands,
'cell_v2': CellV2Commands,
'db': DbCommands,
'fixed': FixedIpCommands,
'floating': FloatingIpCommands,
'host': HostCommands,
'logs': GetLogCommands,
'network': NetworkCommands,
'project': ProjectCommands,
'shell': ShellCommands,
'vpn': VpnCommands,
}
add_command_parsers = functools.partial(cmd_common.add_command_parsers,
categories=CATEGORIES)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
config.parse_args(sys.argv)
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
_EXTRA_DEFAULT_LOG_LEVELS)
logging.setup(CONF, "nova")
objects.register_all()
if CONF.category.name == "version":
print(version.version_string_with_package())
return(0)
if CONF.category.name == "bash-completion":
cmd_common.print_bash_completion(CATEGORIES)
return(0)
try:
fn, fn_args, fn_kwargs = cmd_common.get_action_fn()
ret = fn(*fn_args, **fn_kwargs)
rpc.cleanup()
return(ret)
except Exception as ex:
print(_("error: %s") % ex)
return(1)
| apache-2.0 |
chubbymaggie/barf-project | barf/analysis/__init__.py | 98 | 1345 | # Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-2-clause |
Dinjesk/android_kernel_oneplus_msm8996 | scripts/build-all.py | 162 | 14627 | #! /usr/bin/env python2
# Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
from collections import namedtuple
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import threading
import Queue
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
failed_targets = []
BuildResult = namedtuple('BuildResult', ['status', 'messages'])
class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
def set_width(self, width):
self.width = width
def __enter__(self):
self.log = open(self.log_name, 'w')
def __exit__(self, type, value, traceback):
self.log.close()
def run(self):
self.status = None
messages = ["Building: " + self.short_name]
def printer(line):
text = "[%-*s] %s" % (self.width, self.short_name, line)
messages.append(text)
self.log.write(text)
self.log.write('\n')
for step in self.steps:
st = step.run(printer)
if st:
self.status = BuildResult(self.short_name, messages)
break
if not self.status:
self.status = BuildResult(None, messages)
class BuildTracker:
"""Manages all of the steps necessary to perform a build. The
build consists of one or more sequences of steps. The different
sequences can be processed independently, while the steps within a
sequence must be done in order."""
def __init__(self, parallel_builds):
self.sequence = []
self.lock = threading.Lock()
self.parallel_builds = parallel_builds
def add_sequence(self, log_name, short_name, steps):
self.sequence.append(BuildSequence(log_name, short_name, steps))
def longest_name(self):
longest = 0
for seq in self.sequence:
longest = max(longest, len(seq.short_name))
return longest
def __repr__(self):
return "BuildTracker(%s)" % self.sequence
def run_child(self, seq):
seq.set_width(self.longest)
tok = self.build_tokens.get()
with self.lock:
print "Building:", seq.short_name
with seq:
seq.run()
self.results.put(seq.status)
self.build_tokens.put(tok)
def run(self):
self.longest = self.longest_name()
self.results = Queue.Queue()
children = []
errors = []
self.build_tokens = Queue.Queue()
nthreads = self.parallel_builds
print "Building with", nthreads, "threads"
for i in range(nthreads):
self.build_tokens.put(True)
for seq in self.sequence:
child = threading.Thread(target=self.run_child, args=[seq])
children.append(child)
child.start()
for child in children:
stats = self.results.get()
if all_options.verbose:
with self.lock:
for line in stats.messages:
print line
sys.stdout.flush()
if stats.status:
errors.append(stats.status)
for child in children:
child.join()
if errors:
fail("\n ".join(["Failed targets:"] + errors))
class PrintStep:
"""A step that just prints a message"""
def __init__(self, message):
self.message = message
def run(self, outp):
outp(self.message)
class MkdirStep:
"""A step that makes a directory"""
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("mkdir %s" % self.direc)
os.mkdir(self.direc)
class RmtreeStep:
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("rmtree %s" % self.direc)
shutil.rmtree(self.direc, ignore_errors=True)
class CopyfileStep:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def run(self, outp):
outp("cp %s %s" % (self.src, self.dest))
shutil.copyfile(self.src, self.dest)
class ExecStep:
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.kwargs = kwargs
def run(self, outp):
outp("exec: %s" % (" ".join(self.cmd),))
with open('/dev/null', 'r') as devnull:
proc = subprocess.Popen(self.cmd, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**self.kwargs)
stdout = proc.stdout
while True:
line = stdout.readline()
if not line:
break
line = line.rstrip('\n')
outp(line)
result = proc.wait()
if result != 0:
return ('error', result)
else:
return None
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
self.log_name = "%s/log-%s.log" % (build_dir, self.name)
def build(self):
steps = []
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
steps.append(PrintStep('Building %s in %s log %s' %
(self.name, dest_dir, log_name)))
if not os.path.isdir(dest_dir):
steps.append(MkdirStep(dest_dir))
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env))
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
steps.append(ExecStep(cmd_line + [t], env=self.make_env))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env))
steps.append(CopyfileStep(savedefconfig, defconfig))
return steps
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
# To try and make up for the link phase being serial, try to do
# two full builds in parallel. Don't do too many because lots of
# parallel builds tends to use up available memory rather quickly.
parallel = 2
if all_options.jobs and all_options.jobs > 1:
j = max(all_options.jobs / parallel, 2)
make_command.append("-j" + str(j))
tracker = BuildTracker(parallel)
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
steps = target.build()
tracker.add_sequence(target.log_name, target.name, steps)
tracker.run()
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
dwadler/QGIS | python/plugins/processing/algs/help/__init__.py | 20 | 2091 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import codecs
import yaml
from qgis.core import QgsSettings, Qgis
from qgis.PyQt.QtCore import QLocale
def loadShortHelp():
h = {}
path = os.path.dirname(__file__)
for f in os.listdir(path):
if f.endswith("yaml"):
filename = os.path.join(path, f)
with codecs.open(filename, encoding='utf-8') as stream:
h.update(yaml.load(stream))
version = ".".join(Qgis.QGIS_VERSION.split(".")[0:2])
overrideLocale = QgsSettings().value('locale/overrideFlag', False, bool)
if not overrideLocale:
locale = QLocale.system().name()[:2]
else:
locale = QgsSettings().value('locale/userLocale', '')
locale = locale.split("_")[0]
def replace(s):
if s is not None:
return s.replace("{qgisdocs}", "https://docs.qgis.org/%s/%s/docs" % (version, locale))
else:
return None
h = {k: replace(v) for k, v in list(h.items())}
return h
shortHelp = loadShortHelp()
| gpl-2.0 |
Smiljanic/Esspresso-Code | testsuite/python/ewald_gpu.py | 6 | 2129 | #
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import espressomd
import unittest as ut
import numpy as np
from espressomd.electrostatics import EwaldGpu
class ewald_GPU_test(ut.TestCase):
es=espressomd.System()
test_params={}
test_params["bjerrum_length"]=2
test_params["num_kx"]=2
test_params["num_ky"]=2
test_params["num_kz"]=2
test_params["K_max"]=10
test_params["time_calc_steps"]=100
test_params["rcut"]=0.9
test_params["accuracy"]=1e-1
test_params["precision"]=1e-2
test_params["alpha"]=3.5
def runTest(self):
ewald=EwaldGpu(bjerrum_length=self.test_params["bjerrum_length"], num_kx=self.test_params["num_kx"], num_ky=self.test_params["num_ky"], num_kz=self.test_params["num_kz"], rcut=self.test_params["rcut"], accuracy=self.test_params["accuracy"], precision=self.test_params["precision"], alpha=self.test_params["alpha"], time_calc_steps=self.test_params["time_calc_steps"], K_max=self.test_params["K_max"])
self.es.actors.add(ewald)
set_params=ewald._getParamsFromEsCore()
SAME=True
for i in self.test_params.keys():
if set_params[i] != self.test_params[i]:
print "Parameter mismatch: ", i, set_params[i], self.test_params[i]
SAME=False
break
return SAME
if __name__ == "__main__":
print("Features: ",espressomd.features())
ut.main()
| gpl-3.0 |
jdubs/cloud-custodian | tests/test_actions.py | 5 | 2069 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.exceptions import ClientError
from c7n.actions import Action, ActionRegistry
from .common import BaseTest
class ActionTest(BaseTest):
def test_process_unimplemented(self):
self.assertRaises(NotImplementedError, Action().process, None)
def test_run_api(self):
resp = {
'Error': {
'Code': 'DryRunOperation',
'Message': 'would have succeeded',
},
'ResponseMetadata': {
'HTTPStatusCode': 412
}
}
func = lambda: (_ for _ in ()).throw(ClientError(resp, 'test'))
# Hard to test for something because it just logs a message, but make
# sure that the ClientError gets caught and not re-raised
Action()._run_api(func)
def test_run_api_error(self):
resp = {
'Error': {
'Code': 'Foo',
'Message': 'Bar',
}
}
func = lambda: (_ for _ in ()).throw(ClientError(resp, 'test2'))
self.assertRaises(ClientError, Action()._run_api, func)
class ActionRegistryTest(BaseTest):
def test_error_bad_action_type(self):
self.assertRaises(ValueError, ActionRegistry('test.actions').factory, {}, None)
def test_error_unregistered_action_type(self):
self.assertRaises(ValueError, ActionRegistry('test.actions').factory, 'foo', None)
| apache-2.0 |
TheMOOCAgency/edx-platform | lms/djangoapps/ccx/tests/test_models.py | 10 | 7596 | """
tests for the models
"""
import ddt
import json
from datetime import datetime, timedelta
from nose.plugins.attrib import attr
from pytz import utc
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory,
)
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import (
CourseFactory,
check_mongo_calls
)
from .factories import (
CcxFactory,
)
from ..overrides import override_field_for_ccx
@ddt.ddt
@attr(shard=1)
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = CourseFactory.create()
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(utc)
self.set_ccx_override('start', expected)
actual = self.ccx.start # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(utc)
self.set_ccx_override('start', now)
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement, no-member
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due # pylint: disable=no-member
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(utc)
self.set_ccx_override('due', expected)
actual = self.ccx.due # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(utc)
self.set_ccx_override('due', expected)
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement, no-member
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(utc)
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(utc)
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(utc)
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(utc)
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_max_student_enrollment_correct(self):
"""
Verify the override value for max_student_enrollments_allowed
"""
expected = 200
self.set_ccx_override('max_student_enrollments_allowed', expected)
actual = self.ccx.max_student_enrollments_allowed # pylint: disable=no-member
self.assertEqual(expected, actual)
def test_structure_json_default_empty(self):
"""
By default structure_json does not contain anything
"""
self.assertEqual(self.ccx.structure_json, None) # pylint: disable=no-member
self.assertEqual(self.ccx.structure, None) # pylint: disable=no-member
def test_structure_json(self):
"""
Test a json stored in the structure_json
"""
dummy_struct = [
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_4",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_5",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_11"
]
json_struct = json.dumps(dummy_struct)
ccx = CcxFactory(
course_id=self.course.id,
coach=self.coach,
structure_json=json_struct
)
self.assertEqual(ccx.structure_json, json_struct) # pylint: disable=no-member
self.assertEqual(ccx.structure, dummy_struct) # pylint: disable=no-member
def test_locator_property(self):
"""
Verify that the locator helper property returns a correct CCXLocator
"""
locator = self.ccx.locator # pylint: disable=no-member
self.assertEqual(self.ccx.id, long(locator.ccx))
| agpl-3.0 |
Edraak/edx-platform | lms/djangoapps/oauth2_handler/handlers.py | 31 | 8226 | """ Handlers for OpenID Connect provider. """
from django.conf import settings
from django.core.cache import cache
from courseware.access import has_access
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.user_api.models import UserPreference
from student.models import anonymous_id_for_user
from student.models import UserProfile
from lang_pref import LANGUAGE_KEY
from student.roles import GlobalStaff, CourseStaffRole, CourseInstructorRole
class OpenIDHandler(object):
""" Basic OpenID Connect scope handler. """
def scope_openid(self, _data):
""" Only override the sub (subject) claim. """
return ['sub']
def claim_sub(self, data):
"""
Return the value of the sub (subject) claim. The value should be
unique for each user.
"""
# Use the anonymous ID without any course as unique identifier.
# Note that this ID is derived using the value of the `SECRET_KEY`
# setting, this means that users will have different sub
# values for different deployments.
value = anonymous_id_for_user(data['user'], None)
return value
class PermissionsHandler(object):
""" Permissions scope handler """
def scope_permissions(self, _data):
return ['administrator']
def claim_administrator(self, data):
"""
Return boolean indicating user's administrator status.
For our purposes an administrator is any user with is_staff set to True.
"""
return data['user'].is_staff
class ProfileHandler(object):
""" Basic OpenID Connect `profile` scope handler with `locale` claim. """
def scope_profile(self, _data):
""" Add specialized claims. """
return ['name', 'locale']
def claim_name(self, data):
""" User displayable full name. """
user = data['user']
profile = UserProfile.objects.get(user=user)
return profile.name
def claim_locale(self, data):
"""
Return the locale for the users based on their preferences.
Does not return a value if the users have not set their locale preferences.
"""
# Calling UserPreference directly because it is not clear which user made the request.
language = UserPreference.get_value(data['user'], LANGUAGE_KEY)
# If the user has no language specified, return the default one.
if not language:
language = settings.LANGUAGE_CODE
return language
class CourseAccessHandler(object):
"""
Defines two new scopes: `course_instructor` and `course_staff`. Each one is
valid only if the user is instructor or staff of at least one course.
Each new scope has a corresponding claim: `instructor_courses` and
`staff_courses` that lists the course_ids for which the user has instructor
or staff privileges.
The claims support claim request values: if there is no claim request, the
value of the claim is the list all the courses for which the user has the
corresponding privileges. If a claim request is used, then the value of the
claim the list of courses from the requested values that have the
corresponding privileges.
For example, if the user is staff of course_a and course_b but not
course_c, the claim corresponding to the scope request:
scope = openid course_staff
has the value:
{staff_courses: [course_a, course_b] }
For the claim request:
claims = {userinfo: {staff_courses: {values=[course_b, course_d]}}}
the corresponding claim will have the value:
{staff_courses: [course_b] }.
This is useful to quickly determine if a user has the right privileges for a
given course.
For a description of the function naming and arguments, see:
`oauth2_provider/oidc/handlers.py`
"""
COURSE_CACHE_TIMEOUT = getattr(settings, 'OIDC_COURSE_HANDLER_CACHE_TIMEOUT', 60) # In seconds.
def __init__(self, *_args, **_kwargs):
self._course_cache = {}
def scope_course_instructor(self, data):
"""
Scope `course_instructor` valid only if the user is an instructor
of at least one course.
"""
# TODO: unfortunately there is not a faster and still correct way to
# check if a user is instructor of at least one course other than
# checking the access type against all known courses.
course_ids = self.find_courses(data['user'], CourseInstructorRole.ROLE)
return ['instructor_courses'] if course_ids else None
def scope_course_staff(self, data):
"""
Scope `course_staff` valid only if the user is an instructor of at
least one course.
"""
# TODO: see :method:CourseAccessHandler.scope_course_instructor
course_ids = self.find_courses(data['user'], CourseStaffRole.ROLE)
return ['staff_courses'] if course_ids else None
def claim_instructor_courses(self, data):
"""
Claim `instructor_courses` with list of course_ids for which the
user has instructor privileges.
"""
return self.find_courses(data['user'], CourseInstructorRole.ROLE, data.get('values'))
def claim_staff_courses(self, data):
"""
Claim `staff_courses` with list of course_ids for which the user
has staff privileges.
"""
return self.find_courses(data['user'], CourseStaffRole.ROLE, data.get('values'))
def find_courses(self, user, access_type, values=None):
"""
Find all courses for which the user has the specified access type. If
`values` is specified, check only the courses from `values`.
"""
# Check the instance cache and update if not present. The instance
# cache is useful since there are multiple scope and claims calls in the
# same request.
key = (user.id, access_type)
if key in self._course_cache:
course_ids = self._course_cache[key]
else:
course_ids = self._get_courses_with_access_type(user, access_type)
self._course_cache[key] = course_ids
# If values was specified, filter out other courses.
if values is not None:
course_ids = list(set(course_ids) & set(values))
return course_ids
# pylint: disable=missing-docstring
def _get_courses_with_access_type(self, user, access_type):
# Check the application cache and update if not present. The application
# cache is useful since there are calls to different endpoints in close
# succession, for example the id_token and user_info endpoints.
key = '-'.join([str(self.__class__), str(user.id), access_type])
course_ids = cache.get(key)
if not course_ids:
course_keys = CourseOverview.get_all_course_keys()
# Global staff have access to all courses. Filter courses for non-global staff.
if not GlobalStaff().has_user(user):
course_keys = [course_key for course_key in course_keys if has_access(user, access_type, course_key)]
course_ids = [unicode(course_key) for course_key in course_keys]
cache.set(key, course_ids, self.COURSE_CACHE_TIMEOUT)
return course_ids
class IDTokenHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the ID Token handler for the LMS. """
def claim_instructor_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_instructor_courses(data)
else:
return None
def claim_staff_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_staff_courses(data)
else:
return None
class UserInfoHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the UserInfo handler for the LMS. """
pass
| agpl-3.0 |
bukzor/sympy | doc/ext/docscrape_sphinx.py | 52 | 7983 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
Mellthas/quodlibet | quodlibet/tests/test_util_picklehelper.py | 2 | 2029 | # Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests import TestCase
from io import BytesIO
from quodlibet.util.picklehelper import pickle_load, pickle_loads, \
pickle_dumps, pickle_dump, PicklingError, UnpicklingError
class A(dict):
pass
class B(dict):
pass
class Tpickle_load(TestCase):
def test_pickle_load(self):
data = {b"foo": u"bar", u"quux": b"baz"}
for protocol in [0, 1, 2]:
assert pickle_loads(pickle_dumps(data)) == data
assert pickle_load(BytesIO(pickle_dumps(data))) == data
def test_invalid(self):
with self.assertRaises(UnpicklingError):
pickle_loads(b"")
with self.assertRaises(UnpicklingError):
pickle_load(BytesIO(b""))
def test_switch_class(self):
def lookup_func(base, module, name):
if name == "A":
return B
return base(module, name)
value = pickle_loads(pickle_dumps(A()), lookup_func)
assert isinstance(value, B)
def test_pickle_dumps(self):
v = [u"foo", b"bar", 42]
for protocol in [0, 1, 2]:
assert pickle_loads(pickle_dumps(v)) == v
def test_pickle_dumps_fail(self):
class A(object):
def __getstate__(self):
raise Exception
with self.assertRaises(PicklingError):
pickle_dumps(A())
def test_pickle_dump(self):
f = BytesIO()
pickle_dump(42, f)
assert pickle_loads(f.getvalue()) == 42
def test_protocols(self):
pickle_dumps(42, 0)
pickle_dumps(42, 1)
pickle_dumps(42, 2)
with self.assertRaises(ValueError):
pickle_dumps(42, -1)
with self.assertRaises(ValueError):
pickle_dumps(42, 3)
| gpl-2.0 |
Jionglun/622Q1 | wsgi.py | 1 | 39841 | #@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
def index(self):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<h1>期末考題目1</h1>
<h2>40223237</h2>
<h2>黃炯綸</h2>
<h3>2015/6/22</h3>
<a href="drawspur">drawgear2</a>(繪出兩齒輪)<br />
'''
return outstring
#@+node:2015.20150330144929.1713: *3* twoDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def twoDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do2Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1733: *3* threeDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1762: *3* do2Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1735: *3* do3Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
# 從 javascript 導入 JSConstructor
from javascript import JSConstructor
import math
cango = JSConstructor(window.Cango2D)
if (!JSConstructor(window.pfcIsWindows())):
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
session = JSConstructor(window.pfcGetProESession())
# 設定 config option
session.SetConfigOption("comp_placement_assumptions","no")
# 建立擺放零件的位置矩陣
identityMatrix = JSConstructor(window.pfcCreate ("pfcMatrix3D"))
for x in range(4):
for y in range(4):
if (x == y):
JSConstructor(window.identityMatrix.Set (x, y, 1.0))
else:
JSConstructor(window.identityMatrix.Set (x, y, 0.0))
transf = JSConstructor(window.pfcCreate ("pfcTransform3D").Create (identityMatrix))
# 取得目前的工作目錄
currentDir = session.getCurrentDirectory()
# 以目前已開檔, 作為 model
model = session.CurrentModel
# 查驗有無 model, 或 model 類別是否為組立件
if (model == None or model.Type != JSConstructor(window.pfcCreate("pfcModelType").MDL_ASSEMBLY)):
raise ValueError("Current model is not an assembly.")
assembly = model
'''----------------------------------------------- link0 -------------------------------------------------------------'''
# 檔案目錄,建議將圖檔放置工作目錄下較方便使用
descr = rJSConstructor(window.pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link0.prt"))
#若 link1.prt 在 session 則直接取用
componentModel = session.GetModelFromDescr (descr)
# 若 link1.prt 不在 session 則從工作目錄中載入 session
componentModel = session.RetrieveModel(descr)
# 若 link1.prt 已經在 session 則放入組立檔中
if (componentModel != None):
# 注意這個 asmcomp 即為設定約束條件的本體
# asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
asmcomp = assembly.AssembleComponent (componentModel, transf)
# 建立約束條件變數
constrs = JSConstructor(window.pfcCreate ("pfcComponentConstraints"))
# 設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT
asmDatums = ["ASM_FRONT", "ASM_TOP", "ASM_RIGHT"]
# 設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
compDatums = ["FRONT", "TOP", "RIGHT"]
# 建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
ids = JSConstructor(window.pfcCreate ("intseq"))
# 建立路徑變數
path = JSConstructor(window.pfcCreate ("MpfcAssembly").CreateComponentPath (assembly, ids))
# 採用互動式設定相關的變數
MpfcSelect = JSConstructor(window.pfcCreate ("MpfcSelect"))
# 利用迴圈分別約束組立與零件檔中的三個定位平面
for i in range(3):
# 設定組立參考面
asmItem = assembly.GetItemByName (JSConstructor(window.pfcCreate ("pfcModelItemType").ITEM_SURFACE, asmDatums [i]))
# 若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == None):
interactFlag = true
continue
# 設定零件參考面
compItem = componentModel.GetItemByName (JSConstructor(window.pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums [i])
# 若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == None):
interactFlag = true
continue;
asmSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (asmItem, path))
compSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (compItem, None))
constr = JSConstructor(window.pfcCreate ("pfcComponentConstraint").Create (JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN))
constr.AssemblyReference = asmSel
constr.ComponentReference = compSel
constr.Attributes = JSConstructor(window.pfcCreate ("pfcConstraintAttributes")).Create (false, false)
# 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append (constr)
# 設定組立約束條件
asmcomp.SetConstraints (constrs, None)
'''-------------------------------------------------------------------------------------------------------------------'''
'''----------------------------------------------- link1 -------------------------------------------------------------'''
descr = JSConstructor(window.pfcCreate ("pfcModelDescriptor")).CreateFromFileName ("v:/home/fourbar/link1.prt")
componentModel = session.GetModelFromDescr (descr)
componentModel = session.RetrieveModel(descr)
if (componentModel != None):
asmcomp = JSConstructor(window.assembly.AssembleComponent (componentModel, transf)
components = assembly.ListFeaturesByType(true, JSConstructor(window.pfcCreate ("pfcFeatureType")).FEATTYPE_COMPONENT);
featID = components.Item(0).Id
ids.append(featID)
subPath = JSConstructor(window.pfcCreate ("MpfcAssembly")).CreateComponentPath( assembly, ids )
subassembly = subPath.Leaf
asmDatums = ["A_1", "TOP", "ASM_TOP"]
compDatums = ["A_1", "TOP", "TOP"]
relation = (JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN), JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
relationItem = JSConstructor(window.pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE))
constrs = JSConstructor(window.pfcCreate ("pfcComponentConstraints"))
for i in range(2):
asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i])
if (asmItem == None):
interactFlag = True
continue
JSConstructor(window.compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == None):
interactFlag = true
continue
MpfcSelect = JSConstructor(window.pfcCreate ("MpfcSelect"))
asmSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (asmItem, subPath))
compSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (compItem, None))
constr = JSConstructor(window.pfcCreate("pfcComponentConstraint").Create (relation[i]))
constr.AssemblyReference = asmSel
constr.ComponentReference = compSel
constr.Attributes = JSConstructor(window.pfcCreate ("pfcConstraintAttributes").Create (true, false))
constrs.append (constr):
asmcomp.SetConstraints (constrs, None)
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link2 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link2.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID+1);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link3 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link3.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
var ids = pfcCreate ("intseq");
ids.Append(featID+2);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2");
var compDatums = new Array ("A_1");
for (var i = 0; i < 1; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP");
var compDatums = new Array ("A_2", "BOTTON");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, true);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
var session = pfcGetProESession ();
var solid = session.CurrentModel;
properties = solid.GetMassProperty(void null);
var COG = properties.GravityCenter;
document.write("MassProperty:<br />");
document.write("Mass:"+(properties.Mass.toFixed(2))+" pound<br />");
document.write("Average Density:"+(properties.Density.toFixed(2))+" pound/inch^3<br />");
document.write("Surface area:"+(properties.SurfaceArea.toFixed(2))+" inch^2<br />");
document.write("Volume:"+(properties.Volume.toFixed(2))+" inch^3<br />");
document.write("COG_X:"+COG.Item(0).toFixed(2)+"<br />");
document.write("COG_Y:"+COG.Item(1).toFixed(2)+"<br />");
document.write("COG_Z:"+COG.Item(2).toFixed(2)+"<br />");
try
{
document.write("Current Directory:<br />"+currentDir);
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1765: *3* mygeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150621222226.1: *3* drawspur
@cherrypy.expose
# N 為上齒數, M 為下齒數, P 為壓力角
def drawspur(self,N1=15,N2=24, M=4, P=20,midx=400):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
<form method=POST action=mygeartest2>
上齒數:<input type=text name=N1 value='''+str(N1)+'''><br />
下齒數:<input type=text name=N2 value='''+str(N2)+'''><br />
<input type=submit value=畫出正齒輪輪廓>
</form>
<h3>齒輪數為介於 15-80 的整數</h3>
<br /><a href="index">返回</a><br />
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script>
window.onload=function(){
brython();
}
</script>
</body>
</html>
'''
return outstring
#@+node:amd.20150415215023.1: *3* mygeartest2
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest2(self, N1=15, N2=24, M=4, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<br /><a href="drawspur">返回</a><br />
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa = 10
# m 為模數
m = 10
# 第1齒輪齒數
n_g1 = '''+str(N1)+'''
# 第2齒輪齒數
n_g2 = '''+str(N2)+'''
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(400,400)
# rotate to engage
ctx.rotate(pi)
# put it back
ctx.translate(-400,-400)
spur.Spur(ctx).Gear(400,400,rp_g1,n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(400,400+rp_g1+rp_g2)
# rotate to engage
ctx.rotate(-pi/n_g2)
# put it back
ctx.translate(-400,-(400+rp_g1+rp_g2))
spur.Spur(ctx).Gear(400,400+rp_g1+rp_g2,rp_g2,n_g2, pa, "black")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="1500" height="1500"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1737: *3* my3Dgeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.gear = gear.Gear()
cherrypy.server.socket_port = 8087
cherrypy.server.socket_host = '127.0.0.1'
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
#@-leo
| gpl-3.0 |
eyalfa/spark | python/pyspark/streaming/util.py | 48 | 5588 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from datetime import datetime
import traceback
import sys
from py4j.java_gateway import is_instance_of
from pyspark import SparkContext, RDD
class TransformFunction(object):
"""
This class wraps a function RDD[X] -> RDD[Y] that was passed to
DStream.transform(), allowing it to be called from Java via Py4J's
callback server.
Java calls this function with a sequence of JavaRDDs and this function
returns a single JavaRDD pointer back to Java.
"""
_emptyRDD = None
def __init__(self, ctx, func, *deserializers):
self.ctx = ctx
self.func = func
self.deserializers = deserializers
self.rdd_wrap_func = lambda jrdd, ctx, ser: RDD(jrdd, ctx, ser)
self.failure = None
def rdd_wrapper(self, func):
self.rdd_wrap_func = func
return self
def call(self, milliseconds, jrdds):
# Clear the failure
self.failure = None
try:
if self.ctx is None:
self.ctx = SparkContext._active_spark_context
if not self.ctx or not self.ctx._jsc:
# stopped
return
# extend deserializers with the first one
sers = self.deserializers
if len(sers) < len(jrdds):
sers += (sers[0],) * (len(jrdds) - len(sers))
rdds = [self.rdd_wrap_func(jrdd, self.ctx, ser) if jrdd else None
for jrdd, ser in zip(jrdds, sers)]
t = datetime.fromtimestamp(milliseconds / 1000.0)
r = self.func(t, *rdds)
if r:
# Here, we work around to ensure `_jrdd` is `JavaRDD` by wrapping it by `map`.
# org.apache.spark.streaming.api.python.PythonTransformFunction requires to return
# `JavaRDD`; however, this could be `JavaPairRDD` by some APIs, for example, `zip`.
# See SPARK-17756.
if is_instance_of(self.ctx._gateway, r._jrdd, "org.apache.spark.api.java.JavaRDD"):
return r._jrdd
else:
return r.map(lambda x: x)._jrdd
except:
self.failure = traceback.format_exc()
def getLastFailure(self):
return self.failure
def __repr__(self):
return "TransformFunction(%s)" % self.func
class Java:
implements = ['org.apache.spark.streaming.api.python.PythonTransformFunction']
class TransformFunctionSerializer(object):
"""
This class implements a serializer for PythonTransformFunction Java
objects.
This is necessary because the Java PythonTransformFunction objects are
actually Py4J references to Python objects and thus are not directly
serializable. When Java needs to serialize a PythonTransformFunction,
it uses this class to invoke Python, which returns the serialized function
as a byte array.
"""
def __init__(self, ctx, serializer, gateway=None):
self.ctx = ctx
self.serializer = serializer
self.gateway = gateway or self.ctx._gateway
self.gateway.jvm.PythonDStream.registerSerializer(self)
self.failure = None
def dumps(self, id):
# Clear the failure
self.failure = None
try:
func = self.gateway.gateway_property.pool[id]
return bytearray(self.serializer.dumps((
func.func, func.rdd_wrap_func, func.deserializers)))
except:
self.failure = traceback.format_exc()
def loads(self, data):
# Clear the failure
self.failure = None
try:
f, wrap_func, deserializers = self.serializer.loads(bytes(data))
return TransformFunction(self.ctx, f, *deserializers).rdd_wrapper(wrap_func)
except:
self.failure = traceback.format_exc()
def getLastFailure(self):
return self.failure
def __repr__(self):
return "TransformFunctionSerializer(%s)" % self.serializer
class Java:
implements = ['org.apache.spark.streaming.api.python.PythonTransformFunctionSerializer']
def rddToFileName(prefix, suffix, timestamp):
"""
Return string prefix-time(.suffix)
>>> rddToFileName("spark", None, 12345678910)
'spark-12345678910'
>>> rddToFileName("spark", "tmp", 12345678910)
'spark-12345678910.tmp'
"""
if isinstance(timestamp, datetime):
seconds = time.mktime(timestamp.timetuple())
timestamp = int(seconds * 1000) + timestamp.microsecond // 1000
if suffix is None:
return prefix + "-" + str(timestamp)
else:
return prefix + "-" + str(timestamp) + "." + suffix
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| apache-2.0 |
KaranToor/MA450 | google-cloud-sdk/lib/surface/compute/target_ssl_proxies/list.py | 3 | 3224 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing target SSL proxies."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class List(base.ListCommand):
"""List target SSL proxies."""
@staticmethod
def Args(parser):
parser.add_argument(
'names',
metavar='NAME',
nargs='*',
default=[],
completion_resource='compute.instances',
help=('If provided, show details for the specified names and/or URIs '
'of resources.'))
regexp = parser.add_argument(
'--regexp', '-r',
help='A regular expression to filter the names of the results on.')
regexp.detailed_help = """\
A regular expression to filter the names of the results on. Any names
that do not match the entire regular expression will be filtered out.
"""
def Format(self, args):
return """
table(
name,
proxyHeader,
service.basename(),
sslCertificates.map().basename().list():label=SSL_CERTIFICATES
)
"""
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = client.MESSAGES_MODULE
project = properties.VALUES.core.project.Get(required=True)
# TODO(b/33298284): remove names and regexp arguments.
filter_uris = []
filter_names = []
if args.names:
log.warn('Name argument for filtering list results is deprecated. '
'Please use --filter flag.')
if args.regexp:
log.warn('--regexp flag for filtering list results is deprecated. '
'Please use --filter flag.')
for name in args.names:
try:
ref = holder.resources.Parse(
name, collection='compute.targetSslProxies')
filter_uris.append(ref.SelfLink())
except resources.UserError:
filter_names.append(name)
request = messages.ComputeTargetSslProxiesListRequest(
project=project,
filter='name eq {0}'.format(args.regexp) if args.regexp else None
)
results = list_pager.YieldFromList(
client.targetSslProxies, request, field='items',
limit=args.limit, batch_size=None)
for item in results:
if not args.names:
yield item
elif item.selfLink in filter_uris or item.name in filter_names:
yield item
List.detailed_help = base_classes.GetGlobalListerHelp('target SSL proxies')
| apache-2.0 |
Oslandia/vizitown_plugin | twisted/internet/pollreactor.py | 47 | 6019 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A poll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import pollreactor
pollreactor.install()
"""
from __future__ import division, absolute_import
# System imports
import errno
from select import error as SelectError, poll
from select import POLLIN, POLLOUT, POLLHUP, POLLERR, POLLNVAL
from zope.interface import implementer
# Twisted imports
from twisted.python import log
from twisted.internet import posixbase
from twisted.internet.interfaces import IReactorFDSet
@implementer(IReactorFDSet)
class PollReactor(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
"""
A reactor that uses poll(2).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
_POLL_DISCONNECTED = (POLLHUP | POLLERR | POLLNVAL)
_POLL_IN = POLLIN
_POLL_OUT = POLLOUT
def __init__(self):
"""
Initialize polling object, file descriptor tracking dictionaries, and
the base class.
"""
self._poller = poll()
self._selectables = {}
self._reads = {}
self._writes = {}
posixbase.PosixReactorBase.__init__(self)
def _updateRegistration(self, fd):
"""Register/unregister an fd with the poller."""
try:
self._poller.unregister(fd)
except KeyError:
pass
mask = 0
if fd in self._reads:
mask = mask | POLLIN
if fd in self._writes:
mask = mask | POLLOUT
if mask != 0:
self._poller.register(fd, mask)
else:
if fd in self._selectables:
del self._selectables[fd]
def _dictRemove(self, selectable, mdict):
try:
# the easy way
fd = selectable.fileno()
# make sure the fd is actually real. In some situations we can get
# -1 here.
mdict[fd]
except:
# the hard way: necessary because fileno() may disappear at any
# moment, thanks to python's underlying sockets impl
for fd, fdes in self._selectables.items():
if selectable is fdes:
break
else:
# Hmm, maybe not the right course of action? This method can't
# fail, because it happens inside error detection...
return
if fd in mdict:
del mdict[fd]
self._updateRegistration(fd)
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
fd = reader.fileno()
if fd not in self._reads:
self._selectables[fd] = reader
self._reads[fd] = 1
self._updateRegistration(fd)
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write.
"""
fd = writer.fileno()
if fd not in self._writes:
self._selectables[fd] = writer
self._writes[fd] = 1
self._updateRegistration(fd)
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
return self._dictRemove(reader, self._reads)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
return self._dictRemove(writer, self._writes)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes])
def doPoll(self, timeout):
"""Poll the poller for new events."""
if timeout is not None:
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
l = self._poller.poll(timeout)
except SelectError as e:
if e.args[0] == errno.EINTR:
return
else:
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def install():
"""Install the poll() reactor."""
p = PollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["PollReactor", "install"]
| gpl-2.0 |
AaronPlave/newspun | newspun/lib/algorithms/time.py | 1 | 4411 | import datetime
import sentiment_analysis
import GunningFog
# sources come in a list, type is a single variable, topic is the tags, category is business, etc.
#def time(sources, topic, types, category, information):
# 'start' time to base bin separation on
# now = datetime.datetime.now()
# bins = {}
# count = 0
# set up the action necessary depending on the type
# if types == proximity:
# data = proximity(article.text, word1, word2)
# elif types == freq:
# incomplete, ask Tyler
# data =
# elif types == sentiment:
#incomplete, ask Tyler
# data = analyze_get_score(article.text)
# elif types == readability:
# data = count(article.text)
# else:
# return "Error: invalid analysis type"
# for article in information:
# time = (article.year*8760)+(article.month*720)+(article.day*24)+(article.hour])
# nowtime = (now.year*8760)+(now.month*720)+(now.day*24)+(now.hour)
# dif = nowtime - time
# if dif < 12:
# bins[1] = data
# elif dif < 24:
# bins[2] = data
# else:
# 'information' is selected sources for a specific source, call more than once if the user
# specifies more than one source
def sentiment_time(information):
now = datetime.datetime.now()
bins = [[],[],[],[],[],[]]
for article in information:
# convert time into total number of hours
time = (article.year*8760)+(article.month*720)+(article.day*24)+(article.hour])
nowtime = (now.year*8760)+(now.month*720)+(now.day*24)+(now.hour)
# compute age of article in hours
dif = nowtime - time
# separate into bins by half days
if dif < 12:
bins[0].append(article["sentiment"])
elif dif < 24:
bins[1].append(article["sentiment"])
elif dif < 36:
bins[2].append(article["sentiment"])
elif dif < 48:
bins[3].append(article["sentiment"])
elif dif < 60:
bins[4].append(article["sentiment"])
else:
bins[5].append(article["sentiment"])
datapoints = []
# make array of averagae sentiment score
for group in bins:
total = 0
count = 0
for num in group:
total += num
count += 1
datapoints.append(total/count)
return datapoints
# 'information' is selected sources for a specific source, call more than once if the user
# specifies more than one source
def readability_time(information):
now = datetime.datetime.now()
bins = [[],[],[],[],[],[]]
for article in information:
# convert time into total number of hours
time = (article.year*8760)+(article.month*720)+(article.day*24)+(article.hour])
nowtime = (now.year*8760)+(now.month*720)+(now.day*24)+(now.hour)
# compute age of article in hours
dif = nowtime - time
# separate into bins by half days
if dif < 12:
bins[0].append(article["readability_score"])
elif dif < 24:
bins[1].append(article["readability_score"])
elif dif < 36:
bins[2].append(article["readability_score"])
elif dif < 48:
bins[3].append(article["readability_score"])
elif dif < 60:
bins[4].append(article["readability_score"])
else:
bins[5].append(article["readability_score"])
datapoints = []
# make array of averagae readability score
for group in bins:
total = 0
count = 0
for num in group:
total += num
count += 1
datapoints.append(total/count)
return datapoints
# 'information' is selected sources for a specific source, call more than once if the user
# specifies more than one source
def proximity_time(information, input1, input2):
now = datetime.datetime.now()
bins = [[],[],[],[],[],[]]
for article in information:
# convert time into total number of hours
time = (article.year*8760)+(article.month*720)+(article.day*24)+(article.hour])
nowtime = (now.year*8760)+(now.month*720)+(now.day*24)+(now.hour)
# compute age of article in hours
dif = nowtime - time
# separate into bins by half days
if dif < 12:
bins[0].append(proximity(article["text"], input1, input2))
elif dif < 24:
bins[1].append(proximity(article["text"], input1, input2))
elif dif < 36:
bins[2].append(proximity(article["text"], input1, input2))
elif dif < 48:
bins[3].append(proximity(article["text"], input1, input2))
elif dif < 60:
bins[4].append(proximity(article["text"], input1, input2))
else:
bins[5].append(proximity(article["text"], input1, input2))
datapoints = []
# make array of averagae readability score
for group in bins:
total = 0
count = 0
for num in group:
total += num
count += 1
datapoints.append(total/count)
return datapoints
| mit |
Sarah-Alsinan/muypicky | lib/python3.6/site-packages/django/utils/six.py | 223 | 30628 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
| mit |
kmacinnis/sympy | sympy/matrices/expressions/transpose.py | 7 | 1942 | from __future__ import print_function, division
from sympy import Basic, Q
from sympy.functions import adjoint, conjugate
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.matrices import MatrixBase
class Transpose(MatrixExpr):
"""
The transpose of a matrix expression.
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the transpose, use the ``transpose()``
function, or the ``.T`` attribute of matrices.
Examples
========
>>> from sympy.matrices import MatrixSymbol, Transpose
>>> from sympy.functions import transpose
>>> A = MatrixSymbol('A', 3, 5)
>>> B = MatrixSymbol('B', 5, 3)
>>> Transpose(A)
A'
>>> A.T == transpose(A) == Transpose(A)
True
>>> Transpose(A*B)
(A*B)'
>>> transpose(A*B)
B'*A'
"""
is_Transpose = True
def doit(self, **hints):
arg = self.arg
if hints.get('deep', True) and isinstance(arg, Basic):
arg = arg.doit(**hints)
try:
result = arg._eval_transpose()
return result if result is not None else Transpose(arg)
except AttributeError:
return Transpose(arg)
@property
def arg(self):
return self.args[0]
@property
def shape(self):
return self.arg.shape[::-1]
def _entry(self, i, j):
return self.arg._entry(j, i)
def _eval_adjoint(self):
return conjugate(self.arg)
def _eval_conjugate(self):
return adjoint(self.arg)
def _eval_transpose(self):
return self.arg
def _eval_trace(self):
from .trace import Trace
return Trace(self.arg) # Trace(X.T) => Trace(X)
def _eval_determinant(self):
from sympy.matrices.expressions.determinant import det
return det(self.arg)
def transpose(expr):
""" Matrix transpose """
return Transpose(expr).doit()
| bsd-3-clause |
noironetworks/heat | heat/tests/test_urlfetch.py | 1 | 4338 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import requests
from requests import exceptions
import six
from heat.common import urlfetch
from heat.tests import common
class Response(object):
def __init__(self, buf=''):
self.buf = buf
def iter_content(self, chunk_size=1):
while self.buf:
yield self.buf[:chunk_size]
self.buf = self.buf[chunk_size:]
def raise_for_status(self):
pass
class UrlFetchTest(common.HeatTestCase):
def test_file_scheme_default_behaviour(self):
self.assertRaises(urlfetch.URLFetchError,
urlfetch.get, 'file:///etc/profile')
def test_file_scheme_supported(self):
data = '{ "foo": "bar" }'
url = 'file:///etc/profile'
mock_open = self.patchobject(six.moves.urllib.request, 'urlopen')
mock_open.return_value = six.moves.cStringIO(data)
self.assertEqual(data, urlfetch.get(url, allowed_schemes=['file']))
mock_open.assert_called_once_with(url)
def test_file_scheme_failure(self):
url = 'file:///etc/profile'
mock_open = self.patchobject(six.moves.urllib.request, 'urlopen')
mock_open.side_effect = six.moves.urllib.error.URLError('oops')
self.assertRaises(urlfetch.URLFetchError,
urlfetch.get, url, allowed_schemes=['file'])
mock_open.assert_called_once_with(url)
def test_http_scheme(self):
url = 'http://example.com/template'
data = b'{ "foo": "bar" }'
response = Response(data)
mock_get = self.patchobject(requests, 'get')
mock_get.return_value = response
self.assertEqual(data, urlfetch.get(url))
mock_get.assert_called_once_with(url, stream=True)
def test_https_scheme(self):
url = 'https://example.com/template'
data = b'{ "foo": "bar" }'
response = Response(data)
mock_get = self.patchobject(requests, 'get')
mock_get.return_value = response
self.assertEqual(data, urlfetch.get(url))
mock_get.assert_called_once_with(url, stream=True)
def test_http_error(self):
url = 'http://example.com/template'
mock_get = self.patchobject(requests, 'get')
mock_get.side_effect = exceptions.HTTPError()
self.assertRaises(urlfetch.URLFetchError, urlfetch.get, url)
mock_get.assert_called_once_with(url, stream=True)
def test_non_exist_url(self):
url = 'http://non-exist.com/template'
mock_get = self.patchobject(requests, 'get')
mock_get.side_effect = exceptions.Timeout()
self.assertRaises(urlfetch.URLFetchError, urlfetch.get, url)
mock_get.assert_called_once_with(url, stream=True)
def test_garbage(self):
self.assertRaises(urlfetch.URLFetchError, urlfetch.get, 'wibble')
def test_max_fetch_size_okay(self):
url = 'http://example.com/template'
data = b'{ "foo": "bar" }'
response = Response(data)
cfg.CONF.set_override('max_template_size', 500)
mock_get = self.patchobject(requests, 'get')
mock_get.return_value = response
urlfetch.get(url)
mock_get.assert_called_once_with(url, stream=True)
def test_max_fetch_size_error(self):
url = 'http://example.com/template'
data = b'{ "foo": "bar" }'
response = Response(data)
cfg.CONF.set_override('max_template_size', 5)
mock_get = self.patchobject(requests, 'get')
mock_get.return_value = response
exception = self.assertRaises(urlfetch.URLFetchError,
urlfetch.get, url)
self.assertIn("Template exceeds", six.text_type(exception))
mock_get.assert_called_once_with(url, stream=True)
| apache-2.0 |
dreamapplehappy/myblog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/style.py | 95 | 3672 | # -*- coding: utf-8 -*-
"""
pygments.style
~~~~~~~~~~~~~~
Basic style object.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Token, STANDARD_TYPES
class StyleMeta(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0]*2 + col[1]*2 + col[2]*2
elif text == '':
return ''
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
return {
'color': t[0] or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': t[4] or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
class Style(object, metaclass=StyleMeta):
background_color = '#ffffff'
#: highlight background color
highlight_color = '#ffffcc'
#: Style definitions for individual token types.
styles = {}
| mit |
siutanwong/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
Francis-Liu/animated-broccoli | nova/api/openstack/compute/image_metadata.py | 23 | 6249 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import image_metadata
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
import nova.image
ALIAS = 'image-metadata'
class ImageMetadataController(wsgi.Controller):
"""The image metadata API controller for the OpenStack API."""
def __init__(self):
self.image_api = nova.image.API()
def _get_image(self, context, image_id):
try:
return self.image_api.get(context, image_id)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors((403, 404))
def index(self, req, image_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
@extensions.expected_errors((403, 404))
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
@extensions.expected_errors((400, 403, 404, 413))
@validation.schema(image_metadata.create)
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
for key, value in six.iteritems(body['metadata']):
image['properties'][key] = value
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
image = self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=image['properties'])
@extensions.expected_errors((400, 403, 404, 413))
@validation.schema(image_metadata.update)
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
meta = body['meta']
if id not in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(meta=meta)
@extensions.expected_errors((400, 403, 404, 413))
@validation.schema(image_metadata.update_all)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body['metadata']
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=metadata)
@extensions.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if id not in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
class ImageMetadata(extensions.V21APIExtensionBase):
"""Image Metadata API."""
name = "ImageMetadata"
alias = ALIAS
version = 1
def get_resources(self):
parent = {'member_name': 'image',
'collection_name': 'images'}
resources = [extensions.ResourceExtension('metadata',
ImageMetadataController(),
member_name='image_meta',
parent=parent,
custom_routes_fn=
self.image_metadata_map
)]
return resources
def get_controller_extensions(self):
return []
def image_metadata_map(self, mapper, wsgi_resource):
mapper.connect("metadata",
"/{project_id}/images/{image_id}/metadata",
controller=wsgi_resource,
action='update_all', conditions={"method": ['PUT']})
| apache-2.0 |
jachitech/AndroidPrebuiltPackages | packages/libxml2-2.9.4/python/tests/indexes.py | 35 | 2968 | #!/usr/bin/python -u
# -*- coding: ISO-8859-1 -*-
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
ctxt = None
class callback:
def __init__(self, startd, starte, ende, delta, endd):
self.startd = startd
self.starte = starte
self.ende = ende
self.endd = endd
self.delta = delta
self.count = 0
def startDocument(self):
global ctxt
if ctxt.byteConsumed() != self.startd:
print("document start at wrong index: %d expecting %d\n" % (
ctxt.byteConsumed(), self.startd))
sys.exit(1)
def endDocument(self):
global ctxt
expect = self.ende + self.delta * (self.count - 1) + self.endd
if ctxt.byteConsumed() != expect:
print("document end at wrong index: %d expecting %d\n" % (
ctxt.byteConsumed(), expect))
sys.exit(1)
def startElement(self, tag, attrs):
global ctxt
if tag == "bar1":
expect = self.starte + self.delta * self.count
if ctxt.byteConsumed() != expect:
print("element start at wrong index: %d expecting %d\n" % (
ctxt.byteConsumed(), expect))
sys.exit(1)
def endElement(self, tag):
global ctxt
if tag == "bar1":
expect = self.ende + self.delta * self.count
if ctxt.byteConsumed() != expect:
print("element end at wrong index: %d expecting %d\n" % (
ctxt.byteConsumed(), expect))
sys.exit(1)
self.count = self.count + 1
def characters(self, data):
pass
#
# First run a pure UTF-8 test
#
handler = callback(0, 13, 27, 198, 183)
ctxt = libxml2.createPushParser(handler, "<foo>\n", 6, "test.xml")
chunk = """ <bar1>chars1</bar1>
<bar2>chars2</bar2>
<bar3>chars3</bar3>
<bar4>chars4</bar4>
<bar5>chars5</bar5>
<bar6><s6</bar6>
<bar7>chars7</bar7>
<bar8>&8</bar8>
<bar9>chars9</bar9>
"""
i = 0
while i < 10000:
ctxt.parseChunk(chunk, len(chunk), 0)
i = i + 1
chunk = "</foo>"
ctxt.parseChunk(chunk, len(chunk), 1)
ctxt=None
#
# Then run a test relying on ISO-Latin-1
#
handler = callback(43, 57, 71, 198, 183)
chunk="""<?xml version="1.0" encoding="ISO-8859-1"?>
<foo>
"""
ctxt = libxml2.createPushParser(handler, chunk, len(chunk), "test.xml")
chunk = """ <bar1>chars1</bar1>
<bar2>chars2</bar2>
<bar3>chars3</bar3>
<bar4>chàrs4</bar4>
<bar5>chars5</bar5>
<bar6><s6</bar6>
<bar7>chars7</bar7>
<bar8>&8</bar8>
<bar9>très 9</bar9>
"""
i = 0
while i < 10000:
ctxt.parseChunk(chunk, len(chunk), 0)
i = i + 1
chunk = "</foo>"
ctxt.parseChunk(chunk, len(chunk), 1)
ctxt=None
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| apache-2.0 |
jscn/django | tests/mail/test_sendtestemail.py | 327 | 3088 | from __future__ import unicode_literals
from django.core import mail
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
@override_settings(
ADMINS=(('Admin', 'admin@example.com'), ('Admin and Manager', 'admin_and_manager@example.com')),
MANAGERS=(('Manager', 'manager@example.com'), ('Admin and Manager', 'admin_and_manager@example.com')),
)
class SendTestEmailManagementCommand(SimpleTestCase):
"""
Test the sending of a test email using the `sendtestemail` command.
"""
def test_single_receiver(self):
"""
The mail is sent with the correct subject and recipient.
"""
recipient = 'joe@example.com'
call_command('sendtestemail', recipient)
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(mail_message.recipients(), [recipient])
def test_multiple_receivers(self):
"""
The mail may be sent with multiple recipients.
"""
recipients = ['joe@example.com', 'jane@example.com']
call_command('sendtestemail', recipients[0], recipients[1])
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(sorted(mail_message.recipients()), [
'jane@example.com',
'joe@example.com',
])
def test_manager_receivers(self):
"""
The mail should be sent to the email addresses specified in
settings.MANAGERS.
"""
call_command('sendtestemail', '--managers')
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(sorted(mail_message.recipients()), [
'admin_and_manager@example.com',
'manager@example.com',
])
def test_admin_receivers(self):
"""
The mail should be sent to the email addresses specified in
settings.ADMIN.
"""
call_command('sendtestemail', '--admins')
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(sorted(mail_message.recipients()), [
'admin@example.com',
'admin_and_manager@example.com',
])
def test_manager_and_admin_receivers(self):
"""
The mail should be sent to the email addresses specified in both
settings.MANAGERS and settings.ADMINS.
"""
call_command('sendtestemail', '--managers', '--admins')
self.assertEqual(len(mail.outbox), 2)
manager_mail = mail.outbox[0]
self.assertEqual(sorted(manager_mail.recipients()), [
'admin_and_manager@example.com',
'manager@example.com',
])
admin_mail = mail.outbox[1]
self.assertEqual(sorted(admin_mail.recipients()), [
'admin@example.com',
'admin_and_manager@example.com',
])
| bsd-3-clause |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch11/demo_mds.py | 25 | 3724 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import numpy as np
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import linear_model, manifold, decomposition, datasets
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
# all examples will have three classes in this file
colors = ['r', 'g', 'b']
markers = ['o', 6, '*']
def plot_demo_1():
X = np.c_[np.ones(5), 2 * np.ones(5), 10 * np.ones(5)].T
y = np.array([0, 1, 2])
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 2 dimensions")
filename = "mds_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_iris_mds():
iris = datasets.load_iris()
X = iris.data
y = iris.target
# MDS
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 2 dimensions")
filename = "mds_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
# PCA
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
pca = decomposition.PCA(n_components=3)
Xtrans = pca.fit(X).transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 3 dimensions")
ax.view_init(50, -35)
pca = decomposition.PCA(n_components=2)
Xtrans = pca.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 2 dimensions")
filename = "pca_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_demo_1()
plot_iris_mds()
| mit |
aabbox/kbengine | kbe/src/lib/python/Lib/test/test_pathlib.py | 60 | 74519 | import collections
import io
import os
import errno
import pathlib
import pickle
import shutil
import socket
import stat
import sys
import tempfile
import unittest
from contextlib import contextmanager
from test import support
TESTFN = support.TESTFN
try:
import grp, pwd
except ImportError:
grp = pwd = None
class _BaseFlavourTest(object):
def _check_parse_parts(self, arg, expected):
f = self.flavour.parse_parts
sep = self.flavour.sep
altsep = self.flavour.altsep
actual = f([x.replace('/', sep) for x in arg])
self.assertEqual(actual, expected)
if altsep:
actual = f([x.replace('/', altsep) for x in arg])
self.assertEqual(actual, expected)
def test_parse_parts_common(self):
check = self._check_parse_parts
sep = self.flavour.sep
# Unanchored parts
check([], ('', '', []))
check(['a'], ('', '', ['a']))
check(['a/'], ('', '', ['a']))
check(['a', 'b'], ('', '', ['a', 'b']))
# Expansion
check(['a/b'], ('', '', ['a', 'b']))
check(['a/b/'], ('', '', ['a', 'b']))
check(['a', 'b/c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Collapsing and stripping excess slashes
check(['a', 'b//c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['a', 'b/c/', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Eliminating standalone dots
check(['.'], ('', '', []))
check(['.', '.', 'b'], ('', '', ['b']))
check(['a', '.', 'b'], ('', '', ['a', 'b']))
check(['a', '.', '.'], ('', '', ['a']))
# The first part is anchored
check(['/a/b'], ('', sep, [sep, 'a', 'b']))
check(['/a', 'b'], ('', sep, [sep, 'a', 'b']))
check(['/a/', 'b'], ('', sep, [sep, 'a', 'b']))
# Ignoring parts before an anchored part
check(['a', '/b', 'c'], ('', sep, [sep, 'b', 'c']))
check(['a', '/b', '/c'], ('', sep, [sep, 'c']))
class PosixFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._posix_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# Collapsing of excess leading slashes, except for the double-slash
# special case.
check(['//a', 'b'], ('', '//', ['//', 'a', 'b']))
check(['///a', 'b'], ('', '/', ['/', 'a', 'b']))
check(['////a', 'b'], ('', '/', ['/', 'a', 'b']))
# Paths which look like NT paths aren't treated specially
check(['c:a'], ('', '', ['c:a']))
check(['c:\\a'], ('', '', ['c:\\a']))
check(['\\a'], ('', '', ['\\a']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a/b'), ('', '', 'a/b'))
self.assertEqual(f('a/b/'), ('', '', 'a/b/'))
self.assertEqual(f('/a'), ('', '/', 'a'))
self.assertEqual(f('/a/b'), ('', '/', 'a/b'))
self.assertEqual(f('/a/b/'), ('', '/', 'a/b/'))
# The root is collapsed when there are redundant slashes
# except when there are exactly two leading slashes, which
# is a special case in POSIX.
self.assertEqual(f('//a'), ('', '//', 'a'))
self.assertEqual(f('///a'), ('', '/', 'a'))
self.assertEqual(f('///a/b'), ('', '/', 'a/b'))
# Paths which look like NT paths aren't treated specially
self.assertEqual(f('c:/a/b'), ('', '', 'c:/a/b'))
self.assertEqual(f('\\/a/b'), ('', '', '\\/a/b'))
self.assertEqual(f('\\a\\b'), ('', '', '\\a\\b'))
class NTFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._windows_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# First part is anchored
check(['c:'], ('c:', '', ['c:']))
check(['c:\\'], ('c:', '\\', ['c:\\']))
check(['\\'], ('', '\\', ['\\']))
check(['c:a'], ('c:', '', ['c:', 'a']))
check(['c:\\a'], ('c:', '\\', ['c:\\', 'a']))
check(['\\a'], ('', '\\', ['\\', 'a']))
# UNC paths
check(['\\\\a\\b'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['\\\\a\\b\\'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['\\\\a\\b\\c'], ('\\\\a\\b', '\\', ['\\\\a\\b\\', 'c']))
# Second part is anchored, so that the first part is ignored
check(['a', 'Z:b', 'c'], ('Z:', '', ['Z:', 'b', 'c']))
check(['a', 'Z:\\b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
check(['a', '\\b', 'c'], ('', '\\', ['\\', 'b', 'c']))
# UNC paths
check(['a', '\\\\b\\c', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Collapsing and stripping excess slashes
check(['a', 'Z:\\\\b\\\\c\\', 'd\\'], ('Z:', '\\', ['Z:\\', 'b', 'c', 'd']))
# UNC paths
check(['a', '\\\\b\\c\\\\', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Extended paths
check(['\\\\?\\c:\\'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\']))
check(['\\\\?\\c:\\a'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'a']))
# Extended UNC paths (format is "\\?\UNC\server\share")
check(['\\\\?\\UNC\\b\\c'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\']))
check(['\\\\?\\UNC\\b\\c\\d'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\', 'd']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a\\b'), ('', '', 'a\\b'))
self.assertEqual(f('\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('c:a\\b'), ('c:', '', 'a\\b'))
self.assertEqual(f('c:\\a\\b'), ('c:', '\\', 'a\\b'))
# Redundant slashes in the root are collapsed
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\\\\\a/b'), ('', '\\', 'a/b'))
self.assertEqual(f('c:\\\\a'), ('c:', '\\', 'a'))
self.assertEqual(f('c:\\\\\\a/b'), ('c:', '\\', 'a/b'))
# Valid UNC paths
self.assertEqual(f('\\\\a\\b'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\c\\d'), ('\\\\a\\b', '\\', 'c\\d'))
# These are non-UNC paths (according to ntpath.py and test_ntpath)
# However, command.com says such paths are invalid, so it's
# difficult to know what the right semantics are
self.assertEqual(f('\\\\\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
#
# Tests for the pure classes
#
class _BasePurePathTest(object):
# keys are canonical paths, values are list of tuples of arguments
# supposed to produce equal paths
equivalences = {
'a/b': [
('a', 'b'), ('a/', 'b'), ('a', 'b/'), ('a/', 'b/'),
('a/b/',), ('a//b',), ('a//b//',),
# empty components get removed
('', 'a', 'b'), ('a', '', 'b'), ('a', 'b', ''),
],
'/b/c/d': [
('a', '/b/c', 'd'), ('a', '///b//c', 'd/'),
('/a', '/b/c', 'd'),
# empty components get removed
('/', 'b', '', 'c/d'), ('/', '', 'b/c/d'), ('', '/b/c/d'),
],
}
def setUp(self):
p = self.cls('a')
self.flavour = p._flavour
self.sep = self.flavour.sep
self.altsep = self.flavour.altsep
def test_constructor_common(self):
P = self.cls
p = P('a')
self.assertIsInstance(p, P)
P('a', 'b', 'c')
P('/a', 'b', 'c')
P('a/b/c')
P('/a/b/c')
self.assertEqual(P(P('a')), P('a'))
self.assertEqual(P(P('a'), 'b'), P('a/b'))
self.assertEqual(P(P('a'), P('b')), P('a/b'))
def _check_str_subclass(self, *args):
# Issue #21127: it should be possible to construct a PurePath object
# from an str subclass instance, and it then gets converted to
# a pure str object.
class StrSubclass(str):
pass
P = self.cls
p = P(*(StrSubclass(x) for x in args))
self.assertEqual(p, P(*args))
for part in p.parts:
self.assertIs(type(part), str)
def test_str_subclass_common(self):
self._check_str_subclass('')
self._check_str_subclass('.')
self._check_str_subclass('a')
self._check_str_subclass('a/b.txt')
self._check_str_subclass('/a/b.txt')
def test_join_common(self):
P = self.cls
p = P('a/b')
pp = p.joinpath('c')
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p.joinpath('c', 'd')
self.assertEqual(pp, P('a/b/c/d'))
pp = p.joinpath(P('c'))
self.assertEqual(pp, P('a/b/c'))
pp = p.joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div_common(self):
# Basically the same as joinpath()
P = self.cls
p = P('a/b')
pp = p / 'c'
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p / 'c/d'
self.assertEqual(pp, P('a/b/c/d'))
pp = p / 'c' / 'd'
self.assertEqual(pp, P('a/b/c/d'))
pp = 'c' / p / 'd'
self.assertEqual(pp, P('c/a/b/d'))
pp = p / P('c')
self.assertEqual(pp, P('a/b/c'))
pp = p/ '/c'
self.assertEqual(pp, P('/c'))
def _check_str(self, expected, args):
p = self.cls(*args)
self.assertEqual(str(p), expected.replace('/', self.sep))
def test_str_common(self):
# Canonicalized paths roundtrip
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self._check_str(pathstr, (pathstr,))
# Special case for the empty path
self._check_str('.', ('',))
# Other tests for str() are in test_equivalences()
def test_as_posix_common(self):
P = self.cls
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self.assertEqual(P(pathstr).as_posix(), pathstr)
# Other tests for as_posix() are in test_equivalences()
def test_as_bytes_common(self):
sep = os.fsencode(self.sep)
P = self.cls
self.assertEqual(bytes(P('a/b')), b'a' + sep + b'b')
def test_as_uri_common(self):
P = self.cls
with self.assertRaises(ValueError):
P('a').as_uri()
with self.assertRaises(ValueError):
P().as_uri()
def test_repr_common(self):
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
p = self.cls(pathstr)
clsname = p.__class__.__name__
r = repr(p)
# The repr() is in the form ClassName("forward-slashes path")
self.assertTrue(r.startswith(clsname + '('), r)
self.assertTrue(r.endswith(')'), r)
inner = r[len(clsname) + 1 : -1]
self.assertEqual(eval(inner), p.as_posix())
# The repr() roundtrips
q = eval(r, pathlib.__dict__)
self.assertIs(q.__class__, p.__class__)
self.assertEqual(q, p)
self.assertEqual(repr(q), r)
def test_eq_common(self):
P = self.cls
self.assertEqual(P('a/b'), P('a/b'))
self.assertEqual(P('a/b'), P('a', 'b'))
self.assertNotEqual(P('a/b'), P('a'))
self.assertNotEqual(P('a/b'), P('/a/b'))
self.assertNotEqual(P('a/b'), P())
self.assertNotEqual(P('/a/b'), P('/'))
self.assertNotEqual(P(), P('/'))
self.assertNotEqual(P(), "")
self.assertNotEqual(P(), {})
self.assertNotEqual(P(), int)
def test_match_common(self):
P = self.cls
self.assertRaises(ValueError, P('a').match, '')
self.assertRaises(ValueError, P('a').match, '.')
# Simple relative pattern
self.assertTrue(P('b.py').match('b.py'))
self.assertTrue(P('a/b.py').match('b.py'))
self.assertTrue(P('/a/b.py').match('b.py'))
self.assertFalse(P('a.py').match('b.py'))
self.assertFalse(P('b/py').match('b.py'))
self.assertFalse(P('/a.py').match('b.py'))
self.assertFalse(P('b.py/c').match('b.py'))
# Wilcard relative pattern
self.assertTrue(P('b.py').match('*.py'))
self.assertTrue(P('a/b.py').match('*.py'))
self.assertTrue(P('/a/b.py').match('*.py'))
self.assertFalse(P('b.pyc').match('*.py'))
self.assertFalse(P('b./py').match('*.py'))
self.assertFalse(P('b.py/c').match('*.py'))
# Multi-part relative pattern
self.assertTrue(P('ab/c.py').match('a*/*.py'))
self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
self.assertFalse(P('a.py').match('a*/*.py'))
self.assertFalse(P('/dab/c.py').match('a*/*.py'))
self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
# Absolute pattern
self.assertTrue(P('/b.py').match('/*.py'))
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('a/b.py').match('/*.py'))
self.assertFalse(P('/a/b.py').match('/*.py'))
# Multi-part absolute pattern
self.assertTrue(P('/a/b.py').match('/a/*.py'))
self.assertFalse(P('/ab.py').match('/a/*.py'))
self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
def test_ordering_common(self):
# Ordering is tuple-alike
def assertLess(a, b):
self.assertLess(a, b)
self.assertGreater(b, a)
P = self.cls
a = P('a')
b = P('a/b')
c = P('abc')
d = P('b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
P = self.cls
a = P('/a')
b = P('/a/b')
c = P('/abc')
d = P('/b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
with self.assertRaises(TypeError):
P() < {}
def test_parts_common(self):
# `parts` returns a tuple
sep = self.sep
P = self.cls
p = P('a/b')
parts = p.parts
self.assertEqual(parts, ('a', 'b'))
# The object gets reused
self.assertIs(parts, p.parts)
# When the path is absolute, the anchor is a separate part
p = P('/a/b')
parts = p.parts
self.assertEqual(parts, (sep, 'a', 'b'))
def test_equivalences(self):
for k, tuples in self.equivalences.items():
canon = k.replace('/', self.sep)
posix = k.replace(self.sep, '/')
if canon != posix:
tuples = tuples + [
tuple(part.replace('/', self.sep) for part in t)
for t in tuples
]
tuples.append((posix, ))
pcanon = self.cls(canon)
for t in tuples:
p = self.cls(*t)
self.assertEqual(p, pcanon, "failed with args {}".format(t))
self.assertEqual(hash(p), hash(pcanon))
self.assertEqual(str(p), canon)
self.assertEqual(p.as_posix(), posix)
def test_parent_common(self):
# Relative
P = self.cls
p = P('a/b/c')
self.assertEqual(p.parent, P('a/b'))
self.assertEqual(p.parent.parent, P('a'))
self.assertEqual(p.parent.parent.parent, P())
self.assertEqual(p.parent.parent.parent.parent, P())
# Anchored
p = P('/a/b/c')
self.assertEqual(p.parent, P('/a/b'))
self.assertEqual(p.parent.parent, P('/a'))
self.assertEqual(p.parent.parent.parent, P('/'))
self.assertEqual(p.parent.parent.parent.parent, P('/'))
def test_parents_common(self):
# Relative
P = self.cls
p = P('a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('a/b'))
self.assertEqual(par[1], P('a'))
self.assertEqual(par[2], P('.'))
self.assertEqual(list(par), [P('a/b'), P('a'), P('.')])
with self.assertRaises(IndexError):
par[-1]
with self.assertRaises(IndexError):
par[3]
with self.assertRaises(TypeError):
par[0] = p
# Anchored
p = P('/a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('/a/b'))
self.assertEqual(par[1], P('/a'))
self.assertEqual(par[2], P('/'))
self.assertEqual(list(par), [P('/a/b'), P('/a'), P('/')])
with self.assertRaises(IndexError):
par[3]
def test_drive_common(self):
P = self.cls
self.assertEqual(P('a/b').drive, '')
self.assertEqual(P('/a/b').drive, '')
self.assertEqual(P('').drive, '')
def test_root_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').root, '')
self.assertEqual(P('a/b').root, '')
self.assertEqual(P('/').root, sep)
self.assertEqual(P('/a/b').root, sep)
def test_anchor_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').anchor, '')
self.assertEqual(P('a/b').anchor, '')
self.assertEqual(P('/').anchor, sep)
self.assertEqual(P('/a/b').anchor, sep)
def test_name_common(self):
P = self.cls
self.assertEqual(P('').name, '')
self.assertEqual(P('.').name, '')
self.assertEqual(P('/').name, '')
self.assertEqual(P('a/b').name, 'b')
self.assertEqual(P('/a/b').name, 'b')
self.assertEqual(P('/a/b/.').name, 'b')
self.assertEqual(P('a/b.py').name, 'b.py')
self.assertEqual(P('/a/b.py').name, 'b.py')
def test_suffix_common(self):
P = self.cls
self.assertEqual(P('').suffix, '')
self.assertEqual(P('.').suffix, '')
self.assertEqual(P('..').suffix, '')
self.assertEqual(P('/').suffix, '')
self.assertEqual(P('a/b').suffix, '')
self.assertEqual(P('/a/b').suffix, '')
self.assertEqual(P('/a/b/.').suffix, '')
self.assertEqual(P('a/b.py').suffix, '.py')
self.assertEqual(P('/a/b.py').suffix, '.py')
self.assertEqual(P('a/.hgrc').suffix, '')
self.assertEqual(P('/a/.hgrc').suffix, '')
self.assertEqual(P('a/.hg.rc').suffix, '.rc')
self.assertEqual(P('/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('/a/Some name. Ending with a dot.').suffix, '')
def test_suffixes_common(self):
P = self.cls
self.assertEqual(P('').suffixes, [])
self.assertEqual(P('.').suffixes, [])
self.assertEqual(P('/').suffixes, [])
self.assertEqual(P('a/b').suffixes, [])
self.assertEqual(P('/a/b').suffixes, [])
self.assertEqual(P('/a/b/.').suffixes, [])
self.assertEqual(P('a/b.py').suffixes, ['.py'])
self.assertEqual(P('/a/b.py').suffixes, ['.py'])
self.assertEqual(P('a/.hgrc').suffixes, [])
self.assertEqual(P('/a/.hgrc').suffixes, [])
self.assertEqual(P('a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('/a/Some name. Ending with a dot.').suffixes, [])
def test_stem_common(self):
P = self.cls
self.assertEqual(P('').stem, '')
self.assertEqual(P('.').stem, '')
self.assertEqual(P('..').stem, '..')
self.assertEqual(P('/').stem, '')
self.assertEqual(P('a/b').stem, 'b')
self.assertEqual(P('a/b.py').stem, 'b')
self.assertEqual(P('a/.hgrc').stem, '.hgrc')
self.assertEqual(P('a/.hg.rc').stem, '.hg')
self.assertEqual(P('a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name_common(self):
P = self.cls
self.assertEqual(P('a/b').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/b.py').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b.py').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/Dot ending.').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/Dot ending.').with_name('d.xml'), P('/a/d.xml'))
self.assertRaises(ValueError, P('').with_name, 'd.xml')
self.assertRaises(ValueError, P('.').with_name, 'd.xml')
self.assertRaises(ValueError, P('/').with_name, 'd.xml')
self.assertRaises(ValueError, P('a/b').with_name, '')
self.assertRaises(ValueError, P('a/b').with_name, '/c')
self.assertRaises(ValueError, P('a/b').with_name, 'c/')
self.assertRaises(ValueError, P('a/b').with_name, 'c/d')
def test_with_suffix_common(self):
P = self.cls
self.assertEqual(P('a/b').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b').with_suffix('.gz'), P('/a/b.gz'))
self.assertEqual(P('a/b.py').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b.py').with_suffix('.gz'), P('/a/b.gz'))
# Stripping suffix
self.assertEqual(P('a/b.py').with_suffix(''), P('a/b'))
self.assertEqual(P('/a/b').with_suffix(''), P('/a/b'))
# Path doesn't have a "filename" component
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
# Invalid suffix
self.assertRaises(ValueError, P('a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('a/b').with_suffix, '/')
self.assertRaises(ValueError, P('a/b').with_suffix, '.')
self.assertRaises(ValueError, P('a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.c/.d')
self.assertRaises(ValueError, P('a/b').with_suffix, './.d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.d/.')
def test_relative_to_common(self):
P = self.cls
p = P('a/b')
self.assertRaises(TypeError, p.relative_to)
self.assertRaises(TypeError, p.relative_to, b'a')
self.assertEqual(p.relative_to(P()), P('a/b'))
self.assertEqual(p.relative_to(''), P('a/b'))
self.assertEqual(p.relative_to(P('a')), P('b'))
self.assertEqual(p.relative_to('a'), P('b'))
self.assertEqual(p.relative_to('a/'), P('b'))
self.assertEqual(p.relative_to(P('a/b')), P())
self.assertEqual(p.relative_to('a/b'), P())
# With several args
self.assertEqual(p.relative_to('a', 'b'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('c'))
self.assertRaises(ValueError, p.relative_to, P('a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('a/c'))
self.assertRaises(ValueError, p.relative_to, P('/a'))
p = P('/a/b')
self.assertEqual(p.relative_to(P('/')), P('a/b'))
self.assertEqual(p.relative_to('/'), P('a/b'))
self.assertEqual(p.relative_to(P('/a')), P('b'))
self.assertEqual(p.relative_to('/a'), P('b'))
self.assertEqual(p.relative_to('/a/'), P('b'))
self.assertEqual(p.relative_to(P('/a/b')), P())
self.assertEqual(p.relative_to('/a/b'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/c'))
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('a'))
def test_pickling_common(self):
P = self.cls
p = P('/a/b')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertIs(pp.__class__, p.__class__)
self.assertEqual(pp, p)
self.assertEqual(hash(pp), hash(p))
self.assertEqual(str(pp), str(p))
class PurePosixPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePosixPath
def test_root(self):
P = self.cls
self.assertEqual(P('/a/b').root, '/')
self.assertEqual(P('///a/b').root, '/')
# POSIX special case for two leading slashes
self.assertEqual(P('//a/b').root, '//')
def test_eq(self):
P = self.cls
self.assertNotEqual(P('a/b'), P('A/b'))
self.assertEqual(P('/a'), P('///a'))
self.assertNotEqual(P('/a'), P('//a'))
def test_as_uri(self):
P = self.cls
self.assertEqual(P('/').as_uri(), 'file:///')
self.assertEqual(P('/a/b.c').as_uri(), 'file:///a/b.c')
self.assertEqual(P('/a/b%#c').as_uri(), 'file:///a/b%25%23c')
def test_as_uri_non_ascii(self):
from urllib.parse import quote_from_bytes
P = self.cls
try:
os.fsencode('\xe9')
except UnicodeEncodeError:
self.skipTest("\\xe9 cannot be encoded to the filesystem encoding")
self.assertEqual(P('/a/b\xe9').as_uri(),
'file:///a/b' + quote_from_bytes(os.fsencode('\xe9')))
def test_match(self):
P = self.cls
self.assertFalse(P('A.py').match('a.PY'))
def test_is_absolute(self):
P = self.cls
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertTrue(P('/').is_absolute())
self.assertTrue(P('/a').is_absolute())
self.assertTrue(P('/a/b/').is_absolute())
self.assertTrue(P('//a').is_absolute())
self.assertTrue(P('//a/b').is_absolute())
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(False, P('/dev/con/PRN/NUL').is_reserved())
def test_join(self):
P = self.cls
p = P('//a')
pp = p.joinpath('b')
self.assertEqual(pp, P('//a/b'))
pp = P('/a').joinpath('//c')
self.assertEqual(pp, P('//c'))
pp = P('//a').joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div(self):
# Basically the same as joinpath()
P = self.cls
p = P('//a')
pp = p / 'b'
self.assertEqual(pp, P('//a/b'))
pp = P('/a') / '//c'
self.assertEqual(pp, P('//c'))
pp = P('//a') / '/c'
self.assertEqual(pp, P('/c'))
class PureWindowsPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PureWindowsPath
equivalences = _BasePurePathTest.equivalences.copy()
equivalences.update({
'c:a': [ ('c:', 'a'), ('c:', 'a/'), ('/', 'c:', 'a') ],
'c:/a': [
('c:/', 'a'), ('c:', '/', 'a'), ('c:', '/a'),
('/z', 'c:/', 'a'), ('//x/y', 'c:/', 'a'),
],
'//a/b/': [ ('//a/b',) ],
'//a/b/c': [
('//a/b', 'c'), ('//a/b/', 'c'),
],
})
def test_str(self):
p = self.cls('a/b/c')
self.assertEqual(str(p), 'a\\b\\c')
p = self.cls('c:/a/b/c')
self.assertEqual(str(p), 'c:\\a\\b\\c')
p = self.cls('//a/b')
self.assertEqual(str(p), '\\\\a\\b\\')
p = self.cls('//a/b/c')
self.assertEqual(str(p), '\\\\a\\b\\c')
p = self.cls('//a/b/c/d')
self.assertEqual(str(p), '\\\\a\\b\\c\\d')
def test_str_subclass(self):
self._check_str_subclass('c:')
self._check_str_subclass('c:a')
self._check_str_subclass('c:a\\b.txt')
self._check_str_subclass('c:\\')
self._check_str_subclass('c:\\a')
self._check_str_subclass('c:\\a\\b.txt')
self._check_str_subclass('\\\\some\\share')
self._check_str_subclass('\\\\some\\share\\a')
self._check_str_subclass('\\\\some\\share\\a\\b.txt')
def test_eq(self):
P = self.cls
self.assertEqual(P('c:a/b'), P('c:a/b'))
self.assertEqual(P('c:a/b'), P('c:', 'a', 'b'))
self.assertNotEqual(P('c:a/b'), P('d:a/b'))
self.assertNotEqual(P('c:a/b'), P('c:/a/b'))
self.assertNotEqual(P('/a/b'), P('c:/a/b'))
# Case-insensitivity
self.assertEqual(P('a/B'), P('A/b'))
self.assertEqual(P('C:a/B'), P('c:A/b'))
self.assertEqual(P('//Some/SHARE/a/B'), P('//somE/share/A/b'))
def test_as_uri(self):
from urllib.parse import quote_from_bytes
P = self.cls
with self.assertRaises(ValueError):
P('/a/b').as_uri()
with self.assertRaises(ValueError):
P('c:a/b').as_uri()
self.assertEqual(P('c:/').as_uri(), 'file:///c:/')
self.assertEqual(P('c:/a/b.c').as_uri(), 'file:///c:/a/b.c')
self.assertEqual(P('c:/a/b%#c').as_uri(), 'file:///c:/a/b%25%23c')
self.assertEqual(P('c:/a/b\xe9').as_uri(), 'file:///c:/a/b%C3%A9')
self.assertEqual(P('//some/share/').as_uri(), 'file://some/share/')
self.assertEqual(P('//some/share/a/b.c').as_uri(),
'file://some/share/a/b.c')
self.assertEqual(P('//some/share/a/b%#c\xe9').as_uri(),
'file://some/share/a/b%25%23c%C3%A9')
def test_match_common(self):
P = self.cls
# Absolute patterns
self.assertTrue(P('c:/b.py').match('/*.py'))
self.assertTrue(P('c:/b.py').match('c:*.py'))
self.assertTrue(P('c:/b.py').match('c:/*.py'))
self.assertFalse(P('d:/b.py').match('c:/*.py')) # wrong drive
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('b.py').match('c:*.py'))
self.assertFalse(P('b.py').match('c:/*.py'))
self.assertFalse(P('c:b.py').match('/*.py'))
self.assertFalse(P('c:b.py').match('c:/*.py'))
self.assertFalse(P('/b.py').match('c:*.py'))
self.assertFalse(P('/b.py').match('c:/*.py'))
# UNC patterns
self.assertTrue(P('//some/share/a.py').match('/*.py'))
self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
# Case-insensitivity
self.assertTrue(P('B.py').match('b.PY'))
self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
def test_ordering_common(self):
# Case-insensitivity
def assertOrderedEqual(a, b):
self.assertLessEqual(a, b)
self.assertGreaterEqual(b, a)
P = self.cls
p = P('c:A/b')
q = P('C:a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
p = P('//some/Share/A/b')
q = P('//Some/SHARE/a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
def test_parts(self):
P = self.cls
p = P('c:a/b')
parts = p.parts
self.assertEqual(parts, ('c:', 'a', 'b'))
p = P('c:/a/b')
parts = p.parts
self.assertEqual(parts, ('c:\\', 'a', 'b'))
p = P('//a/b/c/d')
parts = p.parts
self.assertEqual(parts, ('\\\\a\\b\\', 'c', 'd'))
def test_parent(self):
# Anchored
P = self.cls
p = P('z:a/b/c')
self.assertEqual(p.parent, P('z:a/b'))
self.assertEqual(p.parent.parent, P('z:a'))
self.assertEqual(p.parent.parent.parent, P('z:'))
self.assertEqual(p.parent.parent.parent.parent, P('z:'))
p = P('z:/a/b/c')
self.assertEqual(p.parent, P('z:/a/b'))
self.assertEqual(p.parent.parent, P('z:/a'))
self.assertEqual(p.parent.parent.parent, P('z:/'))
self.assertEqual(p.parent.parent.parent.parent, P('z:/'))
p = P('//a/b/c/d')
self.assertEqual(p.parent, P('//a/b/c'))
self.assertEqual(p.parent.parent, P('//a/b'))
self.assertEqual(p.parent.parent.parent, P('//a/b'))
def test_parents(self):
# Anchored
P = self.cls
p = P('z:a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:a'))
self.assertEqual(par[1], P('z:'))
self.assertEqual(list(par), [P('z:a'), P('z:')])
with self.assertRaises(IndexError):
par[2]
p = P('z:/a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:/a'))
self.assertEqual(par[1], P('z:/'))
self.assertEqual(list(par), [P('z:/a'), P('z:/')])
with self.assertRaises(IndexError):
par[2]
p = P('//a/b/c/d')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('//a/b/c'))
self.assertEqual(par[1], P('//a/b'))
self.assertEqual(list(par), [P('//a/b/c'), P('//a/b')])
with self.assertRaises(IndexError):
par[2]
def test_drive(self):
P = self.cls
self.assertEqual(P('c:').drive, 'c:')
self.assertEqual(P('c:a/b').drive, 'c:')
self.assertEqual(P('c:/').drive, 'c:')
self.assertEqual(P('c:/a/b/').drive, 'c:')
self.assertEqual(P('//a/b').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/c/d').drive, '\\\\a\\b')
def test_root(self):
P = self.cls
self.assertEqual(P('c:').root, '')
self.assertEqual(P('c:a/b').root, '')
self.assertEqual(P('c:/').root, '\\')
self.assertEqual(P('c:/a/b/').root, '\\')
self.assertEqual(P('//a/b').root, '\\')
self.assertEqual(P('//a/b/').root, '\\')
self.assertEqual(P('//a/b/c/d').root, '\\')
def test_anchor(self):
P = self.cls
self.assertEqual(P('c:').anchor, 'c:')
self.assertEqual(P('c:a/b').anchor, 'c:')
self.assertEqual(P('c:/').anchor, 'c:\\')
self.assertEqual(P('c:/a/b/').anchor, 'c:\\')
self.assertEqual(P('//a/b').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/c/d').anchor, '\\\\a\\b\\')
def test_name(self):
P = self.cls
self.assertEqual(P('c:').name, '')
self.assertEqual(P('c:/').name, '')
self.assertEqual(P('c:a/b').name, 'b')
self.assertEqual(P('c:/a/b').name, 'b')
self.assertEqual(P('c:a/b.py').name, 'b.py')
self.assertEqual(P('c:/a/b.py').name, 'b.py')
self.assertEqual(P('//My.py/Share.php').name, '')
self.assertEqual(P('//My.py/Share.php/a/b').name, 'b')
def test_suffix(self):
P = self.cls
self.assertEqual(P('c:').suffix, '')
self.assertEqual(P('c:/').suffix, '')
self.assertEqual(P('c:a/b').suffix, '')
self.assertEqual(P('c:/a/b').suffix, '')
self.assertEqual(P('c:a/b.py').suffix, '.py')
self.assertEqual(P('c:/a/b.py').suffix, '.py')
self.assertEqual(P('c:a/.hgrc').suffix, '')
self.assertEqual(P('c:/a/.hgrc').suffix, '')
self.assertEqual(P('c:a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('//My.py/Share.php').suffix, '')
self.assertEqual(P('//My.py/Share.php/a/b').suffix, '')
def test_suffixes(self):
P = self.cls
self.assertEqual(P('c:').suffixes, [])
self.assertEqual(P('c:/').suffixes, [])
self.assertEqual(P('c:a/b').suffixes, [])
self.assertEqual(P('c:/a/b').suffixes, [])
self.assertEqual(P('c:a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:/a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:a/.hgrc').suffixes, [])
self.assertEqual(P('c:/a/.hgrc').suffixes, [])
self.assertEqual(P('c:a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('c:/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('//My.py/Share.php').suffixes, [])
self.assertEqual(P('//My.py/Share.php/a/b').suffixes, [])
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffixes, [])
def test_stem(self):
P = self.cls
self.assertEqual(P('c:').stem, '')
self.assertEqual(P('c:.').stem, '')
self.assertEqual(P('c:..').stem, '..')
self.assertEqual(P('c:/').stem, '')
self.assertEqual(P('c:a/b').stem, 'b')
self.assertEqual(P('c:a/b.py').stem, 'b')
self.assertEqual(P('c:a/.hgrc').stem, '.hgrc')
self.assertEqual(P('c:a/.hg.rc').stem, '.hg')
self.assertEqual(P('c:a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('c:a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name(self):
P = self.cls
self.assertEqual(P('c:a/b').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/b').with_name('d.xml'), P('c:/a/d.xml'))
self.assertEqual(P('c:a/Dot ending.').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/Dot ending.').with_name('d.xml'), P('c:/a/d.xml'))
self.assertRaises(ValueError, P('c:').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:/').with_name, 'd.xml')
self.assertRaises(ValueError, P('//My/Share').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:e')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:/e')
self.assertRaises(ValueError, P('c:a/b').with_name, '//My/Share')
def test_with_suffix(self):
P = self.cls
self.assertEqual(P('c:a/b').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertEqual(P('c:a/b.py').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b.py').with_suffix('.gz'), P('c:/a/b.gz'))
# Path doesn't have a "filename" component
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('//My/Share').with_suffix, '.gz')
# Invalid suffix
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c\\d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c\\d')
def test_relative_to(self):
P = self.cls
p = P('C:Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:foO')), P('Bar'))
self.assertEqual(p.relative_to('c:foO'), P('Bar'))
self.assertEqual(p.relative_to('c:foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:foO/baR')), P())
self.assertEqual(p.relative_to('c:foO/baR'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('Foo'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Baz'))
p = P('C:/Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('/Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('/Foo/Bar'))
self.assertEqual(str(p.relative_to(P('c:'))), '\\Foo\\Bar')
self.assertEqual(str(p.relative_to('c:')), '\\Foo\\Bar')
self.assertEqual(p.relative_to(P('c:/')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:/foO')), P('Bar'))
self.assertEqual(p.relative_to('c:/foO'), P('Bar'))
self.assertEqual(p.relative_to('c:/foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:/foO/baR')), P())
self.assertEqual(p.relative_to('c:/foO/baR'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('C:/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo'))
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('d:/'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//C/Foo'))
# UNC paths
p = P('//Server/Share/Foo/Bar')
self.assertEqual(p.relative_to(P('//sErver/sHare')), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare'), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo')), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo'), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo/'), P('Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo/Bar')), P())
self.assertEqual(p.relative_to('//sErver/sHare/Foo/Bar'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('c:/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//z/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//Server/z/Foo'))
def test_is_absolute(self):
P = self.cls
# Under NT, only paths with both a drive and a root are absolute
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertFalse(P('/').is_absolute())
self.assertFalse(P('/a').is_absolute())
self.assertFalse(P('/a/b/').is_absolute())
self.assertFalse(P('c:').is_absolute())
self.assertFalse(P('c:a').is_absolute())
self.assertFalse(P('c:a/b/').is_absolute())
self.assertTrue(P('c:/').is_absolute())
self.assertTrue(P('c:/a').is_absolute())
self.assertTrue(P('c:/a/b/').is_absolute())
# UNC paths are absolute by definition
self.assertTrue(P('//a/b').is_absolute())
self.assertTrue(P('//a/b/').is_absolute())
self.assertTrue(P('//a/b/c').is_absolute())
self.assertTrue(P('//a/b/c/d').is_absolute())
def test_join(self):
P = self.cls
p = P('C:/a/b')
pp = p.joinpath('x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('/x/y')
self.assertEqual(pp, P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
pp = p.joinpath('D:x/y')
self.assertEqual(pp, P('D:x/y'))
pp = p.joinpath('D:/x/y')
self.assertEqual(pp, P('D:/x/y'))
pp = p.joinpath('//host/share/x/y')
self.assertEqual(pp, P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
pp = p.joinpath('c:x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('c:/x/y')
self.assertEqual(pp, P('C:/x/y'))
def test_div(self):
# Basically the same as joinpath()
P = self.cls
p = P('C:/a/b')
self.assertEqual(p / 'x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'x' / 'y', P('C:/a/b/x/y'))
self.assertEqual(p / '/x/y', P('C:/x/y'))
self.assertEqual(p / '/x' / 'y', P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
self.assertEqual(p / 'D:x/y', P('D:x/y'))
self.assertEqual(p / 'D:' / 'x/y', P('D:x/y'))
self.assertEqual(p / 'D:/x/y', P('D:/x/y'))
self.assertEqual(p / 'D:' / '/x/y', P('D:/x/y'))
self.assertEqual(p / '//host/share/x/y', P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
self.assertEqual(p / 'c:x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'c:/x/y', P('C:/x/y'))
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(True, P('con').is_reserved())
self.assertIs(True, P('NUL').is_reserved())
self.assertIs(True, P('NUL.txt').is_reserved())
self.assertIs(True, P('com1').is_reserved())
self.assertIs(True, P('com9.bar').is_reserved())
self.assertIs(False, P('bar.com9').is_reserved())
self.assertIs(True, P('lpt1').is_reserved())
self.assertIs(True, P('lpt9.bar').is_reserved())
self.assertIs(False, P('bar.lpt9').is_reserved())
# Only the last component matters
self.assertIs(False, P('c:/NUL/con/baz').is_reserved())
# UNC paths are never reserved
self.assertIs(False, P('//my/share/nul/con/aux').is_reserved())
class PurePathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePath
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.PureWindowsPath if os.name == 'nt' else pathlib.PurePosixPath)
def test_different_flavours_unequal(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
self.assertNotEqual(p, q)
def test_different_flavours_unordered(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
with self.assertRaises(TypeError):
p < q
with self.assertRaises(TypeError):
p <= q
with self.assertRaises(TypeError):
p > q
with self.assertRaises(TypeError):
p >= q
#
# Tests for the concrete classes
#
# Make sure any symbolic links in the base test path are resolved
BASE = os.path.realpath(TESTFN)
join = lambda *x: os.path.join(BASE, *x)
rel_join = lambda *x: os.path.join(TESTFN, *x)
def symlink_skip_reason():
if not pathlib.supports_symlinks:
return "no system support for symlinks"
try:
os.symlink(__file__, BASE)
except OSError as e:
return str(e)
else:
support.unlink(BASE)
return None
symlink_skip_reason = symlink_skip_reason()
only_nt = unittest.skipIf(os.name != 'nt',
'test requires a Windows-compatible system')
only_posix = unittest.skipIf(os.name == 'nt',
'test requires a POSIX-compatible system')
with_symlinks = unittest.skipIf(symlink_skip_reason, symlink_skip_reason)
@only_posix
class PosixPathAsPureTest(PurePosixPathTest):
cls = pathlib.PosixPath
@only_nt
class WindowsPathAsPureTest(PureWindowsPathTest):
cls = pathlib.WindowsPath
class _BasePathTest(object):
"""Tests for the FS-accessing functionalities of the Path classes."""
# (BASE)
# |
# |-- dirA/
# |-- linkC -> "../dirB"
# |-- dirB/
# | |-- fileB
# |-- linkD -> "../dirB"
# |-- dirC/
# | |-- fileC
# | |-- fileD
# |-- fileA
# |-- linkA -> "fileA"
# |-- linkB -> "dirB"
#
def setUp(self):
os.mkdir(BASE)
self.addCleanup(support.rmtree, BASE)
os.mkdir(join('dirA'))
os.mkdir(join('dirB'))
os.mkdir(join('dirC'))
os.mkdir(join('dirC', 'dirD'))
with open(join('fileA'), 'wb') as f:
f.write(b"this is file A\n")
with open(join('dirB', 'fileB'), 'wb') as f:
f.write(b"this is file B\n")
with open(join('dirC', 'fileC'), 'wb') as f:
f.write(b"this is file C\n")
with open(join('dirC', 'dirD', 'fileD'), 'wb') as f:
f.write(b"this is file D\n")
if not symlink_skip_reason:
# Relative symlinks
os.symlink('fileA', join('linkA'))
os.symlink('non-existing', join('brokenLink'))
self.dirlink('dirB', join('linkB'))
self.dirlink(os.path.join('..', 'dirB'), join('dirA', 'linkC'))
# This one goes upwards but doesn't create a loop
self.dirlink(os.path.join('..', 'dirB'), join('dirB', 'linkD'))
if os.name == 'nt':
# Workaround for http://bugs.python.org/issue13772
def dirlink(self, src, dest):
os.symlink(src, dest, target_is_directory=True)
else:
def dirlink(self, src, dest):
os.symlink(src, dest)
def assertSame(self, path_a, path_b):
self.assertTrue(os.path.samefile(str(path_a), str(path_b)),
"%r and %r don't point to the same file" %
(path_a, path_b))
def assertFileNotFound(self, func, *args, **kwargs):
with self.assertRaises(FileNotFoundError) as cm:
func(*args, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def _test_cwd(self, p):
q = self.cls(os.getcwd())
self.assertEqual(p, q)
self.assertEqual(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_cwd(self):
p = self.cls.cwd()
self._test_cwd(p)
def test_empty_path(self):
# The empty path points to '.'
p = self.cls('')
self.assertEqual(p.stat(), os.stat('.'))
def test_exists(self):
P = self.cls
p = P(BASE)
self.assertIs(True, p.exists())
self.assertIs(True, (p / 'dirA').exists())
self.assertIs(True, (p / 'fileA').exists())
if not symlink_skip_reason:
self.assertIs(True, (p / 'linkA').exists())
self.assertIs(True, (p / 'linkB').exists())
self.assertIs(False, (p / 'foo').exists())
self.assertIs(False, P('/xyzzy').exists())
def test_open_common(self):
p = self.cls(BASE)
with (p / 'fileA').open('r') as f:
self.assertIsInstance(f, io.TextIOBase)
self.assertEqual(f.read(), "this is file A\n")
with (p / 'fileA').open('rb') as f:
self.assertIsInstance(f, io.BufferedIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
with (p / 'fileA').open('rb', buffering=0) as f:
self.assertIsInstance(f, io.RawIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
def test_iterdir(self):
P = self.cls
p = P(BASE)
it = p.iterdir()
paths = set(it)
expected = ['dirA', 'dirB', 'dirC', 'fileA']
if not symlink_skip_reason:
expected += ['linkA', 'linkB', 'brokenLink']
self.assertEqual(paths, { P(BASE, q) for q in expected })
@with_symlinks
def test_iterdir_symlink(self):
# __iter__ on a symlink to a directory
P = self.cls
p = P(BASE, 'linkB')
paths = set(p.iterdir())
expected = { P(BASE, 'linkB', q) for q in ['fileB', 'linkD'] }
self.assertEqual(paths, expected)
def test_iterdir_nodir(self):
# __iter__ on something that is not a directory
p = self.cls(BASE, 'fileA')
with self.assertRaises(OSError) as cm:
next(p.iterdir())
# ENOENT or EINVAL under Windows, ENOTDIR otherwise
# (see issue #12802)
self.assertIn(cm.exception.errno, (errno.ENOTDIR,
errno.ENOENT, errno.EINVAL))
def test_glob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.glob("fileA")
self.assertIsInstance(it, collections.Iterator)
_check(it, ["fileA"])
_check(p.glob("fileB"), [])
_check(p.glob("dir*/file*"), ["dirB/fileB", "dirC/fileC"])
if symlink_skip_reason:
_check(p.glob("*A"), ['dirA', 'fileA'])
else:
_check(p.glob("*A"), ['dirA', 'fileA', 'linkA'])
if symlink_skip_reason:
_check(p.glob("*B/*"), ['dirB/fileB'])
else:
_check(p.glob("*B/*"), ['dirB/fileB', 'dirB/linkD',
'linkB/fileB', 'linkB/linkD'])
if symlink_skip_reason:
_check(p.glob("*/fileB"), ['dirB/fileB'])
else:
_check(p.glob("*/fileB"), ['dirB/fileB', 'linkB/fileB'])
def test_rglob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.rglob("fileA")
self.assertIsInstance(it, collections.Iterator)
# XXX cannot test because of symlink loops in the test setup
#_check(it, ["fileA"])
#_check(p.rglob("fileB"), ["dirB/fileB"])
#_check(p.rglob("*/fileA"), [""])
#_check(p.rglob("*/fileB"), ["dirB/fileB"])
#_check(p.rglob("file*"), ["fileA", "dirB/fileB"])
# No symlink loops here
p = P(BASE, "dirC")
_check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"])
_check(p.rglob("*/*"), ["dirC/dirD/fileD"])
def test_glob_dotdot(self):
# ".." is not special in globs
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("..")), { P(BASE, "..") })
self.assertEqual(set(p.glob("dirA/../file*")), { P(BASE, "dirA/../fileA") })
self.assertEqual(set(p.glob("../xyzzy")), set())
def _check_resolve_relative(self, p, expected):
q = p.resolve()
self.assertEqual(q, expected)
def _check_resolve_absolute(self, p, expected):
q = p.resolve()
self.assertEqual(q, expected)
@with_symlinks
def test_resolve_common(self):
P = self.cls
p = P(BASE, 'foo')
with self.assertRaises(OSError) as cm:
p.resolve()
self.assertEqual(cm.exception.errno, errno.ENOENT)
# These are all relative symlinks
p = P(BASE, 'dirB', 'fileB')
self._check_resolve_relative(p, p)
p = P(BASE, 'linkA')
self._check_resolve_relative(p, P(BASE, 'fileA'))
p = P(BASE, 'dirA', 'linkC', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirB', 'linkD', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
# Now create absolute symlinks
d = tempfile.mkdtemp(suffix='-dirD')
self.addCleanup(support.rmtree, d)
os.symlink(os.path.join(d), join('dirA', 'linkX'))
os.symlink(join('dirB'), os.path.join(d, 'linkY'))
p = P(BASE, 'dirA', 'linkX', 'linkY', 'fileB')
self._check_resolve_absolute(p, P(BASE, 'dirB', 'fileB'))
@with_symlinks
def test_resolve_dot(self):
# See https://bitbucket.org/pitrou/pathlib/issue/9/pathresolve-fails-on-complex-symlinks
p = self.cls(BASE)
self.dirlink('.', join('0'))
self.dirlink(os.path.join('0', '0'), join('1'))
self.dirlink(os.path.join('1', '1'), join('2'))
q = p / '2'
self.assertEqual(q.resolve(), p)
def test_with(self):
p = self.cls(BASE)
it = p.iterdir()
it2 = p.iterdir()
next(it2)
with p:
pass
# I/O operation on closed path
self.assertRaises(ValueError, next, it)
self.assertRaises(ValueError, next, it2)
self.assertRaises(ValueError, p.open)
self.assertRaises(ValueError, p.resolve)
self.assertRaises(ValueError, p.absolute)
self.assertRaises(ValueError, p.__enter__)
def test_chmod(self):
p = self.cls(BASE) / 'fileA'
mode = p.stat().st_mode
# Clear writable bit
new_mode = mode & ~0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# Set writable bit
new_mode = mode | 0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# XXX also need a test for lchmod
def test_stat(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(p.stat(), st)
# Change file mode by flipping write bit
p.chmod(st.st_mode ^ 0o222)
self.addCleanup(p.chmod, st.st_mode)
self.assertNotEqual(p.stat(), st)
@with_symlinks
def test_lstat(self):
p = self.cls(BASE)/ 'linkA'
st = p.stat()
self.assertNotEqual(st, p.lstat())
def test_lstat_nosymlink(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(st, p.lstat())
@unittest.skipUnless(pwd, "the pwd module is needed for this test")
def test_owner(self):
p = self.cls(BASE) / 'fileA'
uid = p.stat().st_uid
try:
name = pwd.getpwuid(uid).pw_name
except KeyError:
self.skipTest(
"user %d doesn't have an entry in the system database" % uid)
self.assertEqual(name, p.owner())
@unittest.skipUnless(grp, "the grp module is needed for this test")
def test_group(self):
p = self.cls(BASE) / 'fileA'
gid = p.stat().st_gid
try:
name = grp.getgrgid(gid).gr_name
except KeyError:
self.skipTest(
"group %d doesn't have an entry in the system database" % gid)
self.assertEqual(name, p.group())
def test_unlink(self):
p = self.cls(BASE) / 'fileA'
p.unlink()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rmdir(self):
p = self.cls(BASE) / 'dirA'
for q in p.iterdir():
q.unlink()
p.rmdir()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rename(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Renaming to another path
q = P / 'dirA' / 'fileAA'
p.rename(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Renaming to a str of a relative path
r = rel_join('fileAAA')
q.rename(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_replace(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Replacing a non-existing path
q = P / 'dirA' / 'fileAA'
p.replace(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Replacing another (existing) path
r = rel_join('dirB', 'fileB')
q.replace(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_touch_common(self):
P = self.cls(BASE)
p = P / 'newfileA'
self.assertFalse(p.exists())
p.touch()
self.assertTrue(p.exists())
st = p.stat()
old_mtime = st.st_mtime
old_mtime_ns = st.st_mtime_ns
# Rewind the mtime sufficiently far in the past to work around
# filesystem-specific timestamp granularity.
os.utime(str(p), (old_mtime - 10, old_mtime - 10))
# The file mtime should be refreshed by calling touch() again
p.touch()
st = p.stat()
self.assertGreaterEqual(st.st_mtime_ns, old_mtime_ns)
self.assertGreaterEqual(st.st_mtime, old_mtime)
# Now with exist_ok=False
p = P / 'newfileB'
self.assertFalse(p.exists())
p.touch(mode=0o700, exist_ok=False)
self.assertTrue(p.exists())
self.assertRaises(OSError, p.touch, exist_ok=False)
def test_touch_nochange(self):
P = self.cls(BASE)
p = P / 'fileA'
p.touch()
with p.open('rb') as f:
self.assertEqual(f.read().strip(), b"this is file A")
def test_mkdir(self):
P = self.cls(BASE)
p = P / 'newdirA'
self.assertFalse(p.exists())
p.mkdir()
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_parents(self):
# Creating a chain of directories
p = self.cls(BASE, 'newdirB', 'newdirC')
self.assertFalse(p.exists())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
p.mkdir(parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
# test `mode` arg
mode = stat.S_IMODE(p.stat().st_mode) # default mode
p = self.cls(BASE, 'newdirD', 'newdirE')
p.mkdir(0o555, parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
if os.name != 'nt':
# the directory's permissions follow the mode argument
self.assertEqual(stat.S_IMODE(p.stat().st_mode), 0o7555 & mode)
# the parent's permissions follow the default process settings
self.assertEqual(stat.S_IMODE(p.parent.stat().st_mode), mode)
@with_symlinks
def test_symlink_to(self):
P = self.cls(BASE)
target = P / 'fileA'
# Symlinking a path target
link = P / 'dirA' / 'linkAA'
link.symlink_to(target)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
# Symlinking a str target
link = P / 'dirA' / 'linkAAA'
link.symlink_to(str(target))
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertFalse(link.is_dir())
# Symlinking to a directory
target = P / 'dirB'
link = P / 'dirA' / 'linkAAAA'
link.symlink_to(target, target_is_directory=True)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertTrue(link.is_dir())
self.assertTrue(list(link.iterdir()))
def test_is_dir(self):
P = self.cls(BASE)
self.assertTrue((P / 'dirA').is_dir())
self.assertFalse((P / 'fileA').is_dir())
self.assertFalse((P / 'non-existing').is_dir())
if not symlink_skip_reason:
self.assertFalse((P / 'linkA').is_dir())
self.assertTrue((P / 'linkB').is_dir())
self.assertFalse((P/ 'brokenLink').is_dir())
def test_is_file(self):
P = self.cls(BASE)
self.assertTrue((P / 'fileA').is_file())
self.assertFalse((P / 'dirA').is_file())
self.assertFalse((P / 'non-existing').is_file())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_file())
self.assertFalse((P / 'linkB').is_file())
self.assertFalse((P/ 'brokenLink').is_file())
def test_is_symlink(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_symlink())
self.assertFalse((P / 'dirA').is_symlink())
self.assertFalse((P / 'non-existing').is_symlink())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_symlink())
self.assertTrue((P / 'linkB').is_symlink())
self.assertTrue((P/ 'brokenLink').is_symlink())
def test_is_fifo_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_fifo())
self.assertFalse((P / 'dirA').is_fifo())
self.assertFalse((P / 'non-existing').is_fifo())
@unittest.skipUnless(hasattr(os, "mkfifo"), "os.mkfifo() required")
def test_is_fifo_true(self):
P = self.cls(BASE, 'myfifo')
os.mkfifo(str(P))
self.assertTrue(P.is_fifo())
self.assertFalse(P.is_socket())
self.assertFalse(P.is_file())
def test_is_socket_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_socket())
self.assertFalse((P / 'dirA').is_socket())
self.assertFalse((P / 'non-existing').is_socket())
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
def test_is_socket_true(self):
P = self.cls(BASE, 'mysock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(sock.close)
try:
sock.bind(str(P))
except OSError as e:
if "AF_UNIX path too long" in str(e):
self.skipTest("cannot bind Unix socket: " + str(e))
self.assertTrue(P.is_socket())
self.assertFalse(P.is_fifo())
self.assertFalse(P.is_file())
def test_is_block_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_block_device())
self.assertFalse((P / 'dirA').is_block_device())
self.assertFalse((P / 'non-existing').is_block_device())
def test_is_char_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_char_device())
self.assertFalse((P / 'dirA').is_char_device())
self.assertFalse((P / 'non-existing').is_char_device())
def test_is_char_device_true(self):
# Under Unix, /dev/null should generally be a char device
P = self.cls('/dev/null')
if not P.exists():
self.skipTest("/dev/null required")
self.assertTrue(P.is_char_device())
self.assertFalse(P.is_block_device())
self.assertFalse(P.is_file())
def test_pickling_common(self):
p = self.cls(BASE, 'fileA')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertEqual(pp.stat(), p.stat())
def test_parts_interning(self):
P = self.cls
p = P('/usr/bin/foo')
q = P('/usr/local/bin')
# 'usr'
self.assertIs(p.parts[1], q.parts[1])
# 'bin'
self.assertIs(p.parts[2], q.parts[3])
def _check_complex_symlinks(self, link0_target):
# Test solving a non-looping chain of symlinks (issue #19887)
P = self.cls(BASE)
self.dirlink(os.path.join('link0', 'link0'), join('link1'))
self.dirlink(os.path.join('link1', 'link1'), join('link2'))
self.dirlink(os.path.join('link2', 'link2'), join('link3'))
self.dirlink(link0_target, join('link0'))
# Resolve absolute paths
p = (P / 'link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
# Resolve relative paths
old_path = os.getcwd()
os.chdir(BASE)
try:
p = self.cls('link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
finally:
os.chdir(old_path)
@with_symlinks
def test_complex_symlinks_absolute(self):
self._check_complex_symlinks(BASE)
@with_symlinks
def test_complex_symlinks_relative(self):
self._check_complex_symlinks('.')
@with_symlinks
def test_complex_symlinks_relative_dot_dot(self):
self._check_complex_symlinks(os.path.join('dirA', '..'))
class PathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.Path
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.WindowsPath if os.name == 'nt' else pathlib.PosixPath)
def test_unsupported_flavour(self):
if os.name == 'nt':
self.assertRaises(NotImplementedError, pathlib.PosixPath)
else:
self.assertRaises(NotImplementedError, pathlib.WindowsPath)
@only_posix
class PosixPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.PosixPath
def _check_symlink_loop(self, *args):
path = self.cls(*args)
with self.assertRaises(RuntimeError):
print(path.resolve())
def test_open_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
with (p / 'new_file').open('wb'):
pass
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
with (p / 'other_new_file').open('wb'):
pass
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
def test_touch_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
(p / 'new_file').touch()
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
(p / 'other_new_file').touch()
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
(p / 'masked_new_file').touch(mode=0o750)
st = os.stat(join('masked_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o750)
@with_symlinks
def test_resolve_loop(self):
# Loop detection for broken symlinks under POSIX
P = self.cls
# Loops with relative symlinks
os.symlink('linkX/inside', join('linkX'))
self._check_symlink_loop(BASE, 'linkX')
os.symlink('linkY', join('linkY'))
self._check_symlink_loop(BASE, 'linkY')
os.symlink('linkZ/../linkZ', join('linkZ'))
self._check_symlink_loop(BASE, 'linkZ')
# Loops with absolute symlinks
os.symlink(join('linkU/inside'), join('linkU'))
self._check_symlink_loop(BASE, 'linkU')
os.symlink(join('linkV'), join('linkV'))
self._check_symlink_loop(BASE, 'linkV')
os.symlink(join('linkW/../linkW'), join('linkW'))
self._check_symlink_loop(BASE, 'linkW')
def test_glob(self):
P = self.cls
p = P(BASE)
given = set(p.glob("FILEa"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.glob("FILEa*")), set())
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
given = set(p.rglob("FILEd"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.rglob("FILEd*")), set())
@only_nt
class WindowsPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.WindowsPath
def test_glob(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("FILEa")), { P(BASE, "fileA") })
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
self.assertEqual(set(p.rglob("FILEd")), { P(BASE, "dirC/dirD/fileD") })
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
Bismarrck/tensorflow | tensorflow/contrib/sparsemax/python/ops/sparsemax.py | 22 | 3656 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = ["sparsemax"]
def sparsemax(logits, name=None):
"""Computes sparsemax activations [1].
For each batch `i` and class `j` we have
$$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax", [logits]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
obs = array_ops.shape(logits)[0]
dims = array_ops.shape(logits)[1]
# In the paper, they call the logits z.
# The mean(logits) can be substracted from logits to make the algorithm
# more numerically stable. the instability in this algorithm comes mostly
# from the z_cumsum. Substacting the mean will cause z_cumsum to be close
# to zero. However, in practise the numerical instability issues are very
# minor and substacting the mean causes extra issues with inf and nan
# input.
z = logits
# sort z
z_sorted, _ = nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = math_ops.cumsum(z_sorted, axis=1)
k = math_ops.range(
1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)
# calculate tau(z)
# If there are inf values or all values are -inf, the k_z will be zero,
# this is mathematically invalid and will also cause the gather_nd to fail.
# Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then
# fixed later (see p_safe) by returning p = nan. This results in the same
# behavior as softmax.
k_z_safe = math_ops.maximum(k_z, 1)
indices = array_ops.stack([math_ops.range(0, obs), k_z_safe - 1], axis=1)
tau_sum = array_ops.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)
# calculate p
p = math_ops.maximum(
math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
# If k_z = 0 or if z = nan, then the input is invalid
p_safe = array_ops.where(
math_ops.logical_or(
math_ops.equal(k_z, 0), math_ops.is_nan(z_cumsum[:, -1])),
array_ops.fill([obs, dims], math_ops.cast(float("nan"), logits.dtype)),
p)
return p_safe
| apache-2.0 |
ChronoMonochrome/android_external_chromium_org | ppapi/native_client/src/untrusted/pnacl_support_extension/pnacl_component_crx_gen.py | 48 | 13105 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script lays out the PNaCl translator files for a
normal Chrome installer, for one platform. Once run num-of-arches times,
the result can then be packed into a multi-CRX zip file.
This script depends on and pulls in the translator nexes and libraries
from the toolchain directory (so that must be downloaded first) and
it depends on the pnacl_irt_shim.
"""
import json
import logging
import optparse
import os
import platform
import re
import shutil
import sys
J = os.path.join
######################################################################
# Target arch and build arch junk to convert between all the
# silly conventions between SCons, Chrome and PNaCl.
# The version of the arch used by NaCl manifest files.
# This is based on the machine "building" this extension.
# We also used this to identify the arch-specific different versions of
# this extension.
def CanonicalArch(arch):
if arch in ('x86_64', 'x86-64', 'x64', 'amd64'):
return 'x86-64'
# TODO(jvoung): be more specific about the arm architecture version?
if arch in ('arm', 'armv7'):
return 'arm'
if re.match('^i.86$', arch) or arch in ('x86_32', 'x86-32', 'ia32', 'x86'):
return 'x86-32'
return None
def GetBuildArch():
arch = platform.machine()
return CanonicalArch(arch)
BUILD_ARCH = GetBuildArch()
ARCHES = ['x86-32', 'x86-64', 'arm']
def IsValidArch(arch):
return arch in ARCHES
# The version of the arch used by configure and pnacl's build.sh.
def StandardArch(arch):
return {'x86-32': 'i686',
'x86-64': 'x86_64',
'arm' : 'armv7'}[arch]
######################################################################
def GetNaClRoot():
""" Find the native_client path, relative to this script.
This script is in ppapi/... and native_client is a sibling of ppapi.
"""
script_file = os.path.abspath(__file__)
def SearchForNaCl(cur_dir):
if cur_dir.endswith('ppapi'):
parent = os.path.dirname(cur_dir)
sibling = os.path.join(parent, 'native_client')
if not os.path.isdir(sibling):
raise Exception('Could not find native_client relative to %s' %
script_file)
return sibling
# Detect when we've the root (linux is /, but windows is not...)
next_dir = os.path.dirname(cur_dir)
if cur_dir == next_dir:
raise Exception('Could not find native_client relative to %s' %
script_file)
return SearchForNaCl(next_dir)
return SearchForNaCl(script_file)
NACL_ROOT = GetNaClRoot()
######################################################################
# Normalize the platform name to be the way SCons finds chrome binaries.
# This is based on the platform "building" the extension.
def GetBuildPlatform():
if sys.platform == 'darwin':
platform = 'mac'
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform in ('cygwin', 'win32'):
platform = 'windows'
else:
raise Exception('Unknown platform: %s' % sys.platform)
return platform
BUILD_PLATFORM = GetBuildPlatform()
def DetermineInstallerArches(target_arch):
arch = CanonicalArch(target_arch)
if not IsValidArch(arch):
raise Exception('Unknown target_arch %s' % target_arch)
# On windows, we need x86-32 and x86-64 (assuming non-windows RT).
if BUILD_PLATFORM == 'windows':
if arch.startswith('x86'):
return ['x86-32', 'x86-64']
else:
raise Exception('Unknown target_arch on windows w/ target_arch == %s' %
target_arch)
else:
return [arch]
######################################################################
class PnaclPackaging(object):
package_base = os.path.dirname(__file__)
# File paths that are set from the command line.
pnacl_template = None
tool_revisions = None
# Agreed-upon name for pnacl-specific info.
pnacl_json = 'pnacl.json'
@staticmethod
def SetPnaclInfoTemplatePath(path):
PnaclPackaging.pnacl_template = path
@staticmethod
def SetToolsRevisionPath(path):
PnaclPackaging.tool_revisions = path
@staticmethod
def PnaclToolsRevision():
with open(PnaclPackaging.tool_revisions, 'r') as f:
for line in f.read().splitlines():
if line.startswith('PNACL_VERSION'):
_, version = line.split('=')
# CWS happens to use version quads, so make it a quad too.
# However, each component of the quad is limited to 64K max.
# Try to handle a bit more.
max_version = 2 ** 16
version = int(version)
version_more = version / max_version
version = version % max_version
return '0.1.%d.%d' % (version_more, version)
raise Exception('Cannot find PNACL_VERSION in TOOL_REVISIONS file: %s' %
PnaclPackaging.tool_revisions)
@staticmethod
def GeneratePnaclInfo(target_dir, abi_version, arch):
# A note on versions: pnacl_version is the version of translator built
# by the NaCl repo, while abi_version is bumped when the NaCl sandbox
# actually changes.
pnacl_version = PnaclPackaging.PnaclToolsRevision()
with open(PnaclPackaging.pnacl_template, 'r') as pnacl_template_fd:
pnacl_template = json.load(pnacl_template_fd)
out_name = J(target_dir, UseWhitelistedChars(PnaclPackaging.pnacl_json,
None))
with open(out_name, 'w') as output_fd:
pnacl_template['pnacl-arch'] = arch
pnacl_template['pnacl-version'] = pnacl_version
json.dump(pnacl_template, output_fd, sort_keys=True, indent=4)
######################################################################
class PnaclDirs(object):
toolchain_dir = J(NACL_ROOT, 'toolchain')
output_dir = J(toolchain_dir, 'pnacl-package')
@staticmethod
def TranslatorRoot():
return J(PnaclDirs.toolchain_dir, 'pnacl_translator')
@staticmethod
def LibDir(target_arch):
return J(PnaclDirs.TranslatorRoot(), 'lib-%s' % target_arch)
@staticmethod
def SandboxedCompilerDir(target_arch):
return J(PnaclDirs.toolchain_dir,
'pnacl_translator', StandardArch(target_arch), 'bin')
@staticmethod
def SetOutputDir(d):
PnaclDirs.output_dir = d
@staticmethod
def OutputDir():
return PnaclDirs.output_dir
@staticmethod
def OutputAllDir(version_quad):
return J(PnaclDirs.OutputDir(), version_quad)
@staticmethod
def OutputArchBase(arch):
return '%s' % arch
@staticmethod
def OutputArchDir(arch):
# Nest this in another directory so that the layout will be the same
# as the "all"/universal version.
parent_dir = J(PnaclDirs.OutputDir(), PnaclDirs.OutputArchBase(arch))
return (parent_dir, J(parent_dir, PnaclDirs.OutputArchBase(arch)))
######################################################################
def StepBanner(short_desc, long_desc):
logging.info("**** %s\t%s", short_desc, long_desc)
def Clean():
out_dir = PnaclDirs.OutputDir()
StepBanner('CLEAN', 'Cleaning out old packaging: %s' % out_dir)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
else:
logging.info('Clean skipped -- no previous output directory!')
######################################################################
def UseWhitelistedChars(orig_basename, arch):
""" Make the filename match the pattern expected by nacl_file_host.
Currently, this assumes there is prefix "pnacl_public_" and
that the allowed chars are in the set [a-zA-Z0-9_].
"""
if arch:
target_basename = 'pnacl_public_%s_%s' % (arch, orig_basename)
else:
target_basename = 'pnacl_public_%s' % orig_basename
result = re.sub(r'[^a-zA-Z0-9_]', '_', target_basename)
logging.info('UseWhitelistedChars using: %s' % result)
return result
def CopyFlattenDirsAndPrefix(src_dir, arch, dest_dir):
""" Copy files from src_dir to dest_dir.
When copying, also rename the files such that they match the white-listing
pattern in chrome/browser/nacl_host/nacl_file_host.cc.
"""
for (root, dirs, files) in os.walk(src_dir, followlinks=True):
for f in files:
# Assume a flat directory.
assert (f == os.path.basename(f))
full_name = J(root, f)
target_name = UseWhitelistedChars(f, arch)
shutil.copy(full_name, J(dest_dir, target_name))
def BuildArchForInstaller(version_quad, arch, lib_overrides):
""" Build an architecture specific version for the chrome installer.
"""
target_dir = PnaclDirs.OutputDir()
StepBanner('BUILD INSTALLER',
'Packaging for arch %s in %s' % (arch, target_dir))
# Copy llc.nexe and ld.nexe, but with some renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.SandboxedCompilerDir(arch),
arch,
target_dir)
# Copy native libraries, also with renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.LibDir(arch), arch, target_dir)
# Also copy files from the list of overrides.
# This needs the arch tagged onto the name too, like the other files.
if arch in lib_overrides:
for override in lib_overrides[arch]:
override_base = os.path.basename(override)
target_name = UseWhitelistedChars(override_base, arch)
shutil.copy(override, J(target_dir, target_name))
def BuildInstallerStyle(version_quad, lib_overrides, arches):
""" Package the pnacl component for use within the chrome installer
infrastructure. These files need to be named in a special way
so that white-listing of files is easy.
"""
StepBanner("BUILD_ALL", "Packaging installer for version: %s" % version_quad)
for arch in arches:
BuildArchForInstaller(version_quad, arch, lib_overrides)
# Generate pnacl info manifest.
# Hack around the fact that there may be more than one arch, on Windows.
if len(arches) == 1:
arches = arches[0]
PnaclPackaging.GeneratePnaclInfo(PnaclDirs.OutputDir(), version_quad, arches)
######################################################################
def Main():
usage = 'usage: %prog [options] version_arg'
parser = optparse.OptionParser(usage)
# We may want to accept a target directory to dump it in the usual
# output directory (e.g., scons-out).
parser.add_option('-c', '--clean', dest='clean',
action='store_true', default=False,
help='Clean out destination directory first.')
parser.add_option('-d', '--dest', dest='dest',
help='The destination root for laying out the extension')
parser.add_option('-L', '--lib_override',
dest='lib_overrides', action='append', default=[],
help='Specify path to a fresher native library ' +
'that overrides the tarball library with ' +
'(arch:libfile) tuple.')
parser.add_option('-t', '--target_arch',
dest='target_arch', default=None,
help='Only generate the chrome installer version for arch')
parser.add_option('--info_template_path',
dest='info_template_path', default=None,
help='Path of the info template file')
parser.add_option('--tool_revisions_path', dest='tool_revisions_path',
default=None, help='Location of NaCl TOOL_REVISIONS file.')
parser.add_option('-v', '--verbose', dest='verbose', default=False,
action='store_true',
help='Print verbose debug messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
logging.info('pnacl_component_crx_gen w/ options %s and args %s\n'
% (options, args))
# Set destination directory before doing any cleaning, etc.
if options.dest:
PnaclDirs.SetOutputDir(options.dest)
if options.clean:
Clean()
if options.info_template_path:
PnaclPackaging.SetPnaclInfoTemplatePath(options.info_template_path)
if options.tool_revisions_path:
PnaclPackaging.SetToolsRevisionPath(options.tool_revisions_path)
lib_overrides = {}
for o in options.lib_overrides:
arch, override_lib = o.split(',')
arch = CanonicalArch(arch)
if not IsValidArch(arch):
raise Exception('Unknown arch for -L: %s (from %s)' % (arch, o))
if not os.path.isfile(override_lib):
raise Exception('Override native lib not a file for -L: %s (from %s)' %
(override_lib, o))
override_list = lib_overrides.get(arch, [])
override_list.append(override_lib)
lib_overrides[arch] = override_list
if len(args) != 1:
parser.print_help()
parser.error('Incorrect number of arguments')
abi_version = int(args[0])
arches = DetermineInstallerArches(options.target_arch)
BuildInstallerStyle(abi_version, lib_overrides, arches)
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
batxes/4c2vhic | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/mtx1_models/SHH_WT_models_highres12150.py | 4 | 88213 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((2947.21, -1078.44, 2911.71), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((2816.6, -947.04, 2865.62), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((2779.08, -620.61, 2606.35), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2829.72, -939.091, 2332.88), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((2854.86, -1224.37, 1966.67), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2725.3, -672.487, 1932.28), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2622.73, -193.504, 1831.42), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2875.43, -582.19, 1590.26), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((3085.9, -945.313, 1274.97), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2920.62, -567.844, 1316.31), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2677.5, -220.914, 1509.09), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((2341.24, 140.905, 1836.17), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((1984.94, 125.219, 1884.35), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2279.2, -28.8063, 1810.38), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2059.65, -274.61, 1735.26), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((1775.27, -602.884, 1859.91), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((1386.63, -919.852, 2030.19), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((1001.52, -975.906, 2289.08), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((681.387, -1153.52, 2653.1), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((517.802, -1719.83, 2975.96), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((803.588, -1176.07, 2896.92), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((988.322, -1063.05, 2533.81), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((1040.81, -1158.65, 2149), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((835.835, -981.095, 2249.02), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((723.872, -976.166, 2535.5), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((615.183, -765.7, 2780.06), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((747.63, -378.202, 2581.43), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((1036.98, 140.304, 2732.7), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((1298.82, 618.495, 2880.58), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((1646.01, 670.8, 2613.34), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((1774.11, 800.71, 2362.81), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1498.17, 703.761, 2457.23), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((1603.42, 580.88, 2715.92), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((1683.29, 483.882, 2964.49), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1637.1, 539.081, 3156.09), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1461.75, 363.605, 3426.03), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((1318.25, 1251.52, 4036.34), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((1503.83, 2066.73, 4188.74), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((2020.87, 2530.85, 4279.05), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((2620.96, 2627.02, 3932.2), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2464.92, 2279.76, 3283.02), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((2090.39, 1715.76, 2841.36), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((1973.16, 1217.56, 2823.29), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((2263.2, 1239.91, 2977.13), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2687.91, 1661.72, 3265.63), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3151.76, 1630.58, 3261.77), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2781.21, 1056.76, 3227.58), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3435.87, 1191.44, 3408.71), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((4012.1, 1324.71, 3837.26), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((3577.9, 1643.88, 3819.51), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((3057.11, 1676.98, 3486.63), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2940.78, 1782.76, 3306.68), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((3475.9, 2051.07, 3548.17), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((3902.7, 2293.01, 3621.15), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4277.68, 2495.44, 3723.88), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((3974.55, 2568.85, 3133.09), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3479.46, 2500.04, 2973.5), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2921.99, 2323.63, 2911.29), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2479.96, 2033.66, 2926.9), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((2504.44, 1939.43, 3227.14), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2515.67, 1920.15, 3393.67), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2747.04, 2302.1, 3424.84), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((3112.87, 2999.16, 3587.17), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((3553.7, 3834.12, 3873.11), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((3578.96, 3395.51, 3376.27), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((3556.91, 2975.98, 3191.04), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((3437.04, 2791.4, 3376.27), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((3272.49, 2526.36, 3344.82), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((3227.01, 2179.08, 3484.58), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((3276.08, 1780.6, 3744.83), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((3302.15, 1480.5, 4196.46), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((3469.48, 1613.25, 4505.97), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((3750.74, 2267.19, 4274.71), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((4046.8, 2945.2, 3905.33), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((4276.67, 3382.66, 3434.24), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((4315.31, 3503.51, 3032.91), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((4108.45, 3820.12, 3354.73), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((4067.46, 3722.26, 3800.31), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((4212.22, 3819.1, 3715.34), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((4210.94, 4200.36, 3779.83), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((3543.47, 4409, 3593.52), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((3013.59, 3988.1, 3298.94), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((2654.79, 3470.72, 3333.82), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((2347.65, 3366.89, 3496.77), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((2600.73, 4175.76, 3717.42), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((3061.16, 3953.88, 3536.76), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((3307.73, 3472.04, 3291.05), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((3528.52, 3187.22, 3030.82), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((3850.39, 3643.4, 2943.13), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((3802.82, 3596.94, 3333.79), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((3726.35, 3298.01, 3621.23), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((3486.51, 3114.93, 3480.38), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((3536.44, 3138.72, 3140.01), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((3753.63, 3014.55, 2807.33), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((4354.26, 2911.27, 2759.01), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((4797.78, 2860.87, 3071.21), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((4380.25, 2824.15, 3539.47), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((3892.08, 2892.09, 3513.96), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((3987.96, 3016.98, 3211.57), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((4414.19, 3310.87, 3394.35), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((3550.9, 2895.6, 3609.29), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((2611.08, 2468.56, 3585.7), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2245.78, 2415.81, 3241.91), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((2427.57, 2759.85, 3246.72), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((2736.44, 2964.84, 3310.13), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((3118.21, 3052.25, 3367.79), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((3516.75, 2924.37, 3415.88), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((3767.85, 2902.75, 3143.41), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((3953.21, 2898.68, 2880.14), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((3731.75, 2965.3, 2723.25), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((3488.46, 3093.57, 2692.68), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((3562.31, 3342.05, 2496.31), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((3195.78, 3172.42, 2410.33), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2809.76, 2973.86, 2282.91), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2568.12, 2847.79, 2600.4), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((2298.76, 3041.57, 3057.57), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((1903.07, 3188.62, 3459.18), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((1716.09, 3729.9, 3785.43), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((1811.35, 4156, 4100.32), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((1965.52, 3977.64, 3650.51), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((2199.61, 3701.77, 3458.56), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((2589.58, 3457.74, 3263.48), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((3157.12, 3495.69, 3280.2), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((3530.4, 3244.27, 3082.71), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((3861.79, 3105.9, 2860.44), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((4087.87, 2829.23, 2524.85), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((4649.78, 2732.23, 2314.89), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((5211.14, 2653.11, 2344.42), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((4993.61, 2850.34, 2796.04), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((4447, 2967.87, 3264.46), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((4078.87, 3281.99, 3349.08), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((3637.51, 3427.04, 3313.23), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((3329.92, 3572.18, 3088.68), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((3275.29, 3928.2, 2888.2), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((2983.4, 3942.57, 2676.06), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((2677.5, 3855.55, 2774.8), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((2626.82, 3417.69, 2979.74), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2461.31, 2920.84, 3032.49), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2360.79, 2493.81, 2834.42), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2400.78, 2457.3, 3074.37), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2352.65, 2752.07, 3343.01), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((2492.16, 3042.48, 3386.32), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((2632.16, 3318.87, 3193.75), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((2754.28, 3587.12, 3073.21), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((2911.67, 3885.67, 3080.91), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((2774.66, 3514.24, 3128.44), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((2821.38, 3196.05, 3006.85), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((3103.23, 3071.82, 2936.71), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((2968.71, 2657.81, 2876.62), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2934.67, 2340.77, 2728.01), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2871.17, 2549.63, 2459.32), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((3000.52, 2909.03, 2182.2), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((3104.6, 3193.18, 1979.8), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((2860.42, 3216.56, 2239.31), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2601.76, 2781.13, 2513.08), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2450.96, 2436.46, 2787.05), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2757.61, 2294.85, 2887.43), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((3078.24, 2568.46, 2878.47), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((3286.14, 3001.51, 3033.19), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((2943.17, 3088.04, 3335.42), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((2421.42, 2899.59, 3422.23), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((2191.62, 3020.36, 3098.65), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((2481.79, 3602.78, 3061.75), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((2912.55, 4402.88, 3085.1), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((2987.55, 4472.38, 2760.15), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((3016.02, 4194.61, 2533.31), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((2759.36, 4238.32, 2342.23), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((2646.93, 3680.14, 2546.93), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((2523.19, 3100.29, 2773.35), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((2449.8, 2888.3, 3064.74), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((2541.37, 3365.89, 3092.92), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((2644.93, 3865.07, 3107.89), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((2502.42, 3838.63, 3329.84), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((2249.93, 3713.5, 3268), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((2035.34, 3906.15, 3096.39), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((2253.15, 4117.36, 3197.84), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((2621.9, 4072.88, 3541.69), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((3113.57, 3996.88, 3874.19), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((3126.17, 3553.33, 4181.52), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((2864.32, 3148.05, 4134.94), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((2546.52, 2823.04, 3738.43), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((2428.06, 2596.21, 3362), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((2688.85, 2935.98, 3306.89), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((2985.46, 3514.61, 3407.23), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((2847.46, 3817.14, 3201.6), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((2504.68, 3737.66, 3330.19), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((2318.67, 3750.22, 3622.22), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((2206.4, 3950.07, 3988.41), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((2141.94, 3861.3, 3375.01), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((2406.37, 3768.78, 2974.79), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((2760.66, 3753.72, 2812.37), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((3108.61, 3684.06, 2371.08), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((3039.11, 3154, 2115.42), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((3345.27, 2895.34, 1924.18), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((3858.86, 2950.47, 1751.09), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((4681.16, 3318.1, 1434.04), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((4846.77, 3286.82, 1909.39), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((4374.21, 3229.24, 2473.6), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((3650.57, 2791.57, 2914.34), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((3073.13, 2558.53, 3083.68), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((2953.4, 2927.1, 3236.88), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((2898.3, 3366.11, 3115.13), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((2790.65, 3715.72, 2867.37), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((2694.92, 3491.55, 2618.25), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((2974.56, 3427.7, 2512.96), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((3153.69, 3521.3, 2464.57), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2753.38, 2907.2, 2611.3), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2742.89, 2461.72, 2848.23), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((3477.31, 2910.72, 2990.31), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((4083.08, 3702.52, 2756.03), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((3558.02, 4073.51, 2764.53), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((3039.63, 4289.44, 3158.78), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((2766.27, 3997.77, 3820.51), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((2225, 4031.14, 4086.26), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((1658.51, 4305.59, 3956.4), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((1632.62, 4499.97, 3556.22), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((1647.57, 3894.92, 3550.95), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((1562.58, 3514.65, 3802.95), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((1520.9, 3120.08, 3862.44), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((1739.36, 3057.06, 4101.72), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((2063.85, 3546.73, 4184.47), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((2482.65, 3901.69, 3860.25), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((2600.22, 3582.38, 3736.65), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((2332.67, 2918.93, 3613.39), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((2212.23, 2696.83, 3634.48), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((2334.37, 2934.24, 3532.04), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((2746.97, 3024.79, 3516.08), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((2664.8, 3106.6, 3065.85), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2386.42, 2900.23, 2733.18), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2195.12, 2796.86, 2298.87), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((2309.16, 3108.91, 1977.12), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((2529.54, 3274.22, 1767.55), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((2148.9, 3222.91, 2044.37), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((1819.03, 3136.45, 2073.22), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((2190.74, 2967.41, 2298.12), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((2549.46, 3176.24, 2516.46), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((2803.1, 3739.81, 2680.96), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((2799.07, 4195.02, 2995.03), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((2646.88, 4572.11, 2858.35), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((2961.1, 4258.9, 2652.81), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((2516.55, 3793.63, 2300.44), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((2366.24, 3318, 2145.38), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((2589.79, 3481.75, 1994.37), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((2628.11, 3159.6, 2220.25), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((2284.81, 2998.66, 2361.56), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((1990.94, 3002.33, 2510.44), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((1856.08, 2803.43, 2595.07), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((2051, 2523.92, 2268.19), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((2204.26, 2847.35, 2212.31), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((2147.45, 3267.58, 2272.55), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((1876.07, 3291.99, 2514.52), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((1755.12, 3075.31, 2818.71), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((1624.57, 3062.77, 3232.52), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((1550.96, 3206.91, 3602.25), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((1477.1, 3257.52, 3300.42), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((1732.42, 2952.52, 3115.43), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((1882.56, 2696.25, 2809.88), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((1925.11, 2521, 2918.79), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((2037.62, 2471.91, 2938.03), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((2195.83, 2890.45, 3078.67), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((2395.32, 3077.85, 3106.89), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2322.65, 2762.36, 2801.38), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2123.87, 2257.87, 2564.61), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((2050.87, 1800.33, 2355.96), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((2144.02, 1596.49, 2628.36), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2237.05, 1791.45, 2829.7), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((2080.6, 1654.58, 2704.55), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((2011.76, 1642.35, 2748.98), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((1816.48, 1940.86, 2789), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((1512.68, 2378.17, 2596.75), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((1397.8, 2223.28, 2739.4), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((1373.94, 1822.01, 2841.48), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((1282.7, 2671.44, 2656.5), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((1100.45, 3661.35, 2580.76), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((1391.78, 3758.87, 2694.98), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((1730.51, 3560.48, 2580.16), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((1977.36, 3407.84, 2454.88), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((2044.04, 2780.99, 2563.19), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((2027.23, 2175.85, 2667.49), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((1997.71, 2158.34, 2628.38), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((2025.9, 2540.2, 2356.75), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((1936.28, 2095.31, 1959.1), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((1657.97, 1604.7, 1804.39), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((1571.84, 1502.53, 1690.47), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((1622.86, 1570.26, 1527.77), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((1280.22, 1909.8, 1246.55), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((1004.4, 1766.48, 753.907), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((1241.35, 1435.52, 710.777), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((1670.89, 1183.78, 859.953), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((2113.15, 1073.23, 405.793), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((2431.87, 1053.6, -363.583), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((2380.17, 1492.5, -479.683), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((1631.3, 1650.41, 693.449), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((2236.8, 852.428, 204.745), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((2431.11, 430.896, 98.3304), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((2188.38, 606.857, 69.8795), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((1980.8, 726.139, -288.72), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((1733.7, 1043.46, 203.133), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((1609.84, 1203.77, 994.701), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((1574.25, 1500.51, 1300.46), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((1408.42, 1345.9, 1295.9), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((1091.91, 1567.52, 1121.79), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((1009.65, 1913.35, 972.721), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((1105.92, 2267.33, 847.97), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((702.005, 2227.97, 860.439), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((381.988, 2177.19, 585.352), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((304.306, 1945.23, 442.051), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((653.544, 1766.42, 895.784), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((1162.09, 1647.66, 1251.28), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((1591.71, 1813.01, 1395.81), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((1648.14, 1653.19, 1563.82), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((1635.19, 1690.65, 1545.58), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((1408.96, 1570.76, 1479.03), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((817.217, 1648.62, 1571.57), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((811.381, 1206.06, 1451.62), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((1036.63, 1285.08, 1511.63), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((1079.84, 1696.13, 1622.93), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((1065.45, 2326.17, 1337.78), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((1357.11, 2741.69, 766.106), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((1894.17, 2672.44, 659.912), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((2318.82, 2529.48, 822.934), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((2746.31, 2419.89, 781.853), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((3074.7, 2273.19, 608.902), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((3365.27, 2155.77, 208.237), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((2770.62, 2275.19, 369.328), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((2388.35, 2310.13, 688.825), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((2490.94, 2767.27, 753.323), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((2450.66, 3233.28, 637.823), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((2725.02, 2977.8, 951.549), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((2946.98, 2676.79, 1135.94), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((3175.73, 2432.9, 971.927), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((3355.38, 2281.08, 596.964), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((3489.64, 2203.61, 145.067), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((3109.91, 2323.39, 330.013), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((2552.58, 2366.9, 646.549), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((2136.56, 2362.06, 1204.74), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((1692.47, 2398.15, 1424.94), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((1205.41, 2552.28, 1235.87), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((810.353, 2457.61, 972.499), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((546.467, 2085.21, 1165.03), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((488.611, 1730.44, 942.166), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((256.541, 1980.96, 760.908), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((273.781, 2118.58, 1116.93), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((570.777, 2316.32, 1235.29), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((922.954, 2491.67, 941.875), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((876.744, 2892.05, 489.272), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((1337.15, 3084.44, 318.339), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((1926.7, 3034.9, 553.319), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((2315.49, 2680.75, 469.694), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((2665.71, 2535.58, 348.184), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((2855.03, 2748.66, 548.574), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((2496.63, 2884.42, 768.295), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((1964.14, 2810.33, 869.236), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((1312.46, 2704.46, 778.063), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((858.984, 2557.85, 628.763), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((660.116, 2548.67, 823.377), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((847.396, 2226.82, 1301.67), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((811.16, 1943.48, 2000.5), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
simartin/servo | tests/wpt/web-platform-tests/webdriver/tests/delete_cookie/user_prompts.py | 42 | 4008 | # META: timeout=long
import pytest
from webdriver.error import NoSuchCookieException
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def delete_cookie(session, name):
return session.transport.send("DELETE", "/session/%s/cookie/%s" % (session.session_id, name))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog, create_cookie):
def check_user_prompt_closed_without_exception(dialog_type, retval):
create_cookie("foo", value="bar", path="/common/blank.html")
create_dialog(dialog_type, text=dialog_type)
response = delete_cookie(session, "foo")
assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
with pytest.raises(NoSuchCookieException):
assert session.cookies("foo")
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog, create_cookie):
def check_user_prompt_closed_with_exception(dialog_type, retval):
create_cookie("foo", value="bar", path="/common/blank.html")
create_dialog(dialog_type, text=dialog_type)
response = delete_cookie(session, "foo")
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert session.cookies("foo")
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog, create_cookie):
def check_user_prompt_not_closed_but_exception(dialog_type):
create_cookie("foo", value="bar", path="/common/blank.html")
create_dialog(dialog_type, text=dialog_type)
response = delete_cookie(session, "foo")
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert session.cookies("foo")
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 |
dandygithub/kodi | addons/context.dandy.kinopoisk.sc/resources/lib/hdbaza.py | 1 | 4553 | import urllib, urllib2
import json
import re
import socket
import xbmc
import xbmcgui
import XbmcHelpers
common = XbmcHelpers
from videohosts import tools
socket.setdefaulttimeout(120)
QUALITY_TYPES = ("1.LQ", "2.HQ")
PLAYLIST_DOMAIN = "vidozzz.com"
HEADERS = {
"Host": PLAYLIST_DOMAIN,
"Referer": "http://yohoho.cc/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36"
}
HEADERS2 = {
"Origin": "https://" + PLAYLIST_DOMAIN,
"Referer": "{0}",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36"
}
VALUES = {
"snd": "{0}",
"s": "{1}",
"e": "{2}"
}
def select_translator(data, url):
tr_arr = data.split("sounds: [")[-1].split("seasons")[0].replace("\n", "").replace(" ", "").split("],[")
translators = []
tr_values = []
for tr_item in tr_arr:
translators.append(tr_item.split(",")[1].replace("'", "").replace("]", ""))
tr_values.append(tr_item.split(",")[0].replace("'", "").replace("[", ""))
if len(translators) > 1:
dialog = xbmcgui.Dialog()
index_ = dialog.select("Select translator", translators)
if int(index_) < 0:
index_ = 0
else:
index_ = 0
tr_value = tr_values[index_]
VALUES["snd"] = tr_value
VALUES["s"] = "1"
VALUES["e"] = "1"
response = tools.get_response(url, HEADERS, VALUES, "GET")
return response, tr_value
def select_season(data, value):
sss = data.split("soundsList: ")[-1].split("selected_options:")[0].replace(' ', '').replace("\n", '').replace("],}", "]}").replace("},}", "}}").replace("'", '"')
seasonsjson = json.loads(sss[:len(sss)-1])
seasons = []
for season in seasonsjson[value]:
seasons.append(season)
seasons.sort()
values = seasons
if len(seasons) > 1:
dialog = xbmcgui.Dialog()
index_ = dialog.select("Select season", seasons)
if int(index_) < 0:
index_ = -1
else:
index_ = 0
if index_ < 0:
return "", ""
else:
return values[index_], str(index_+1)
def select_episode(data, url):
sindex = None
eindex = None
data_, tr_value = select_translator(data, url)
season, sindex = select_season(data_, tr_value)
if season == "":
return "", sindex, eindex
VALUES["snd"] = tr_value
VALUES["s"] = season
VALUES["e"] = "1"
try:
response = tools.get_response(url, HEADERS, VALUES, "GET")
except:
return "", sindex, eindex
sss = response.split("soundsList: ")[-1].split("selected_options:")[0].replace(' ', '').replace("\n", '').replace("],}", "]}").replace("},}", "}}").replace("'", '"')
seriesjson = json.loads(sss[:len(sss)-1])
series = []
for episode in seriesjson[tr_value][season]:
series.append(str(episode))
evalues = series
if len(series) > 1:
dialog = xbmcgui.Dialog()
index_ = dialog.select("Select episode", series)
if int(index_) < 0:
index_ = -1
else:
index_ = 0
episode = str(index_+1)
eindex = str(index_+1)
if index_ < 0:
return "", sindex, eindex
VALUES["snd"] = tr_value
VALUES["s"] = season
VALUES["e"] = episode
try:
response = tools.get_response(url, HEADERS, VALUES, "GET")
return response, sindex, eindex
except:
return "", sindex, eindex
def get_playlist(url):
manifest_links = {}
subtitles = None
season = None
episode = None
try:
response = tools.get_response(url, HEADERS, {}, "GET")
except:
return manifest_links, subtitles, season, episode
#tvshow
tvshow = response.split("season:")[1].split(",")[0].replace(" ", "")
if (tvshow != "null"):
response, season, episode = select_episode(response, url)
if response == "":
return manifest_links, subtitles, season, episode
part = response.split("hls_master_file_path: '")[-1].split("',")[0]
url_ = "https://gethdhls.com" + part
try:
response = tools.get_response(url_, HEADERS2, {}, "GET")
except:
return manifest_links, subtitles, season, episode
urls = re.compile("https:\/\/.*?\.m3u8").findall(response)
for i, url in enumerate(urls):
manifest_links[QUALITY_TYPES[i]] = url
return manifest_links, subtitles, season, episode
| gpl-3.0 |
marcoarruda/MissionPlanner | ExtLibs/Mavlink/mavtestgen.py | 33 | 3628 | #!/usr/bin/env python
'''
generate a MAVLink test suite
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
import sys, textwrap
from optparse import OptionParser
import mavparse
def gen_value(f, i, language):
'''generate a test value for the ith field of a message'''
type = f.type
# could be an array
if type.find("[") != -1:
aidx = type.find("[")
basetype = type[0:aidx]
if basetype == "array":
basetype = "int8_t"
if language == 'C':
return '(const %s *)"%s%u"' % (basetype, f.name, i)
return '"%s%u"' % (f.name, i)
if type == 'float':
return 17.0 + i*7
if type == 'char':
return 'A' + i
if type == 'int8_t':
return 5 + i
if type in ['int8_t', 'uint8_t']:
return 5 + i
if type in ['uint8_t_mavlink_version']:
return 2
if type in ['int16_t', 'uint16_t']:
return 17235 + i*52
if type in ['int32_t', 'uint32_t']:
v = 963497464 + i*52
if language == 'C':
return "%sL" % v
return v
if type in ['int64_t', 'uint64_t']:
v = 9223372036854775807 + i*63
if language == 'C':
return "%sLL" % v
return v
def generate_methods_python(outf, msgs):
outf.write("""
'''
MAVLink protocol test implementation (auto-generated by mavtestgen.py)
Generated from: %s
Note: this file has been auto-generated. DO NOT EDIT
'''
import mavlink
def generate_outputs(mav):
'''generate all message types as outputs'''
""")
for m in msgs:
if m.name == "HEARTBEAT": continue
outf.write("\tmav.%s_send(" % m.name.lower())
for i in range(0, len(m.fields)):
f = m.fields[i]
outf.write("%s=%s" % (f.name, gen_value(f, i, 'py')))
if i != len(m.fields)-1:
outf.write(",")
outf.write(")\n")
def generate_methods_C(outf, msgs):
outf.write("""
/*
MAVLink protocol test implementation (auto-generated by mavtestgen.py)
Generated from: %s
Note: this file has been auto-generated. DO NOT EDIT
*/
static void mavtest_generate_outputs(mavlink_channel_t chan)
{
""")
for m in msgs:
if m.name == "HEARTBEAT": continue
outf.write("\tmavlink_msg_%s_send(chan," % m.name.lower())
for i in range(0, len(m.fields)):
f = m.fields[i]
outf.write("%s" % gen_value(f, i, 'C'))
if i != len(m.fields)-1:
outf.write(",")
outf.write(");\n")
outf.write("}\n")
######################################################################
'''main program'''
parser = OptionParser("mavtestgen.py [options] <XML files>")
parser.add_option("-o", "--output", dest="output", default="mavtest", help="output file base name")
(opts, args) = parser.parse_args()
if len(args) < 1:
parser.error("You must supply at least one MAVLink XML protocol definition")
msgs = []
enums = []
for fname in args:
(m, e) = mavparse.parse_mavlink_xml(fname)
msgs.extend(m)
enums.extend(e)
if mavparse.check_duplicates(msgs):
sys.exit(1)
print("Found %u MAVLink message types" % len(msgs))
print("Generating python %s" % (opts.output+'.py'))
outf = open(opts.output + '.py', "w")
generate_methods_python(outf, msgs)
outf.close()
print("Generating C %s" % (opts.output+'.h'))
outf = open(opts.output + '.h', "w")
generate_methods_C(outf, msgs)
outf.close()
print("Generated %s OK" % opts.output)
| gpl-3.0 |
hasadna/django | tests/modeltests/update/tests.py | 118 | 4307 | from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from .models import A, B, C, D, DataPoint, RelatedPoint
class SimpleTest(TestCase):
def setUp(self):
self.a1 = A.objects.create()
self.a2 = A.objects.create()
for x in range(20):
B.objects.create(a=self.a1)
D.objects.create(a=self.a1)
def test_nonempty_update(self):
"""
Test that update changes the right number of rows for a nonempty queryset
"""
num_updated = self.a1.b_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update(self):
"""
Test that update changes the right number of rows for an empty queryset
"""
num_updated = self.a2.b_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_nonempty_update_with_inheritance(self):
"""
Test that update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a1.d_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update_with_inheritance(self):
"""
Test that update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a2.d_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
class AdvancedTests(TestCase):
def setUp(self):
self.d0 = DataPoint.objects.create(name="d0", value="apple")
self.d2 = DataPoint.objects.create(name="d2", value="banana")
self.d3 = DataPoint.objects.create(name="d3", value="banana")
self.r1 = RelatedPoint.objects.create(name="r1", data=self.d3)
def test_update(self):
"""
Objects are updated by first filtering the candidates into a queryset
and then calling the update() method. It executes immediately and
returns nothing.
"""
resp = DataPoint.objects.filter(value="apple").update(name="d1")
self.assertEqual(resp, 1)
resp = DataPoint.objects.filter(value="apple")
self.assertEqual(list(resp), [self.d0])
def test_update_multiple_objects(self):
"""
We can update multiple objects at once.
"""
resp = DataPoint.objects.filter(value="banana").update(
value="pineapple")
self.assertEqual(resp, 2)
self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple')
def test_update_fk(self):
"""
Foreign key fields can also be updated, although you can only update
the object referred to, not anything inside the related object.
"""
resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0)
self.assertEqual(resp, 1)
resp = RelatedPoint.objects.filter(data__name="d0")
self.assertEqual(list(resp), [self.r1])
def test_update_multiple_fields(self):
"""
Multiple fields can be updated at once
"""
resp = DataPoint.objects.filter(value="apple").update(
value="fruit", another_value="peach")
self.assertEqual(resp, 1)
d = DataPoint.objects.get(name="d0")
self.assertEqual(d.value, 'fruit')
self.assertEqual(d.another_value, 'peach')
def test_update_all(self):
"""
In the rare case you want to update every instance of a model, update()
is also a manager method.
"""
self.assertEqual(DataPoint.objects.update(value='thing'), 3)
resp = DataPoint.objects.values('value').distinct()
self.assertEqual(list(resp), [{'value': 'thing'}])
def test_update_slice_fail(self):
"""
We do not support update on already sliced query sets.
"""
method = DataPoint.objects.all()[:2].update
self.assertRaises(AssertionError, method,
another_value='another thing')
| bsd-3-clause |
apanju/odoo | addons/website_report/controllers/main.py | 243 | 1460 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.website.controllers.main import Website
from openerp.http import request, route
class Website(Website):
@route()
def customize_template_get(self, xml_id, full=False):
res = super(Website, self).customize_template_get(xml_id, full=full)
if full:
for r in request.session.get('report_view_ids', []):
res += super(Website, self).customize_template_get(r.get('xml_id'), full=full)
return res
| agpl-3.0 |
urandu/rethinkdb | external/v8_3.30.33.16/testing/gmock/gtest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| agpl-3.0 |
Mfellner77/stashboard | stashboard/migrations.py | 14 | 2500 | import logging
from models import Image
from models import Status
MIGRATIONS = {}
def register(migration):
""" Register a migratin with the runner """
MIGRATIONS[migration.__name__] = migration
def find(migration_name):
""" Return the migration for a given class name
If no migration found, returns None
Arguments:
key -- Name of a migration class
"""
if migration_name in MIGRATIONS:
return MIGRATIONS[migration_name]
else:
return None
def all():
""" Return all registred migrations """
return MIGRATIONS.values()
def clear():
""" Clear all registerd migrations """
MIGRATIONS.clear()
class Migration(object):
"""App Engine data migration
A doc string is where you describe what your migration does
No output is shown to the user, so make liberal use logging.
Before running a migration on produciton data, download portitions of
real data into an sample application using the bulk exporter
Register migrations with the MigrationRunner.register method
"""
@classmethod
def name(cls):
return cls.__name__
def start(self):
logging.info("Staring migration %s" % self.__class__.__name__)
self.run()
logging.info("Finished migration %s" % self.__class__.__name__)
def run(self):
"""Run the migration """
pass
class UpdateStatusMigration(Migration):
""" Migrate sample data
This migration does nothing. NOTHING!
"""
def run(self):
logging.info("Update each status")
# For each status
for status in Status.all().fetch(100):
# Set the status to default
status.default = False
# Update the status url
status.image = "icons/fugue/" + status.image + ".png"
# Save the status
status.put()
# Get the up status and make it default
default_status = Status.get_by_slug("up")
if default_status is None:
logging.error("Could not find the up status")
return
default_status.default = True
default_status.put()
logging.info("Set up status as the default")
class AddImagesMigration(Migration):
""" Add images to the database """
def run(self):
logging.info("Load the images into the database")
Image.load_defaults()
logging.info("Loading complete")
register(AddImagesMigration)
register(UpdateStatusMigration)
| mit |
hdinsight/hue | apps/metastore/setup.py | 39 | 1213 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "metastore",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "Metastore browser",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'metastore=metastore' },
)
| apache-2.0 |
alexlo03/ansible | lib/ansible/utils/module_docs_fragments/aws_credentials.py | 65 | 1061 | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# inventory cache
DOCUMENTATION = """
options:
aws_profile:
description: The AWS profile
aliases: ['boto_profile']
env:
- name: AWS_PROFILE
- name: AWS_DEFAULT_PROFILE
aws_access_key:
description: The AWS access key to use.
env:
- name: AWS_ACCESS_KEY_ID
- name: AWS_ACCESS_KEY
- name: EC2_ACCESS_KEY
aws_secret_key:
description: The AWS secret key that corresponds to the access key.
env:
- name: AWS_SECRET_ACCESS_KEY
- name: AWS_SECRET_KEY
- name: EC2_SECRET_KEY
aws_security_token:
description: The AWS security token if using temporary access and secret keys.
env:
- name: AWS_SECURITY_TOKEN
- name: AWS_SESSION_TOKEN
- name: EC2_SECURITY_TOKEN
region:
description: The region for which to create the connection.
env:
- name: AWS_REGION
- name: EC2_REGION
"""
| gpl-3.0 |
softak/webfaction_demo | apps/stores/migrations/0023_auto__add_storeimage.py | 1 | 8329 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StoreImage'
db.create_table('stores_storeimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('store', self.gf('django.db.models.fields.related.ForeignKey')(related_name='images', to=orm['stores.Store'])),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
))
db.send_create_signal('stores', ['StoreImage'])
def backwards(self, orm):
# Deleting model 'StoreImage'
db.delete_table('stores_storeimage')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stores.category': {
'Meta': {'object_name': 'Category'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marker': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'stores.discount': {
'Meta': {'object_name': 'Discount'},
'for_additional_buyer': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'for_additional_item': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lower_bound': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stores.item': {
'Meta': {'object_name': 'Item'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_out_of_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['stores.Store']"})
},
'stores.itemimage': {
'Meta': {'object_name': 'ItemImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['stores.Item']"})
},
'stores.shoppingregion': {
'Meta': {'object_name': 'ShoppingRegion'},
'center': ('django.contrib.gis.db.models.fields.PointField', [], {'spatial_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zoom': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'stores.store': {
'Meta': {'object_name': 'Store'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stores'", 'to': "orm['stores.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'paypal_email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'paypal_is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stores.ShoppingRegion']", 'null': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'store'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'stores.storeimage': {
'Meta': {'object_name': 'StoreImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['stores.Store']"})
}
}
complete_apps = ['stores']
| bsd-3-clause |
chaosblog/pyload | module/Api.py | 40 | 34010 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
from base64 import standard_b64encode
from os.path import join
from time import time
import re
from PyFile import PyFile
from utils import freeSpace, compare_time
from common.packagetools import parseNames
from network.RequestFactory import getURL
from remote import activated
if activated:
try:
from remote.thriftbackend.thriftgen.pyload.ttypes import *
from remote.thriftbackend.thriftgen.pyload.Pyload import Iface
BaseObject = TBase
except ImportError:
print "Thrift not imported"
from remote.socketbackend.ttypes import *
else:
from remote.socketbackend.ttypes import *
# contains function names mapped to their permissions
# unlisted functions are for admins only
permMap = {}
# decorator only called on init, never initialized, so has no effect on runtime
def permission(bits):
class _Dec(object):
def __new__(cls, func, *args, **kwargs):
permMap[func.__name__] = bits
return func
return _Dec
urlmatcher = re.compile(r"((https?|ftps?|xdcc|sftp):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+\-=\\\.&]*)", re.IGNORECASE)
class PERMS:
ALL = 0 # requires no permission, but login
ADD = 1 # can add packages
DELETE = 2 # can delete packages
STATUS = 4 # see and change server status
LIST = 16 # see queue and collector
MODIFY = 32 # moddify some attribute of downloads
DOWNLOAD = 64 # can download from webinterface
SETTINGS = 128 # can access settings
ACCOUNTS = 256 # can access accounts
LOGS = 512 # can see server logs
class ROLE:
ADMIN = 0 #admin has all permissions implicit
USER = 1
def has_permission(userperms, perms):
# bytewise or perms before if needed
return perms == (userperms & perms)
class Api(Iface):
"""
**pyLoads API**
This is accessible either internal via core.api or via thrift backend.
see Thrift specification file remote/thriftbackend/pyload.thrift\
for information about data structures and what methods are usuable with rpc.
Most methods requires specific permissions, please look at the source code if you need to know.\
These can be configured via webinterface.
Admin user have all permissions, and are the only ones who can access the methods with no specific permission.
"""
EXTERNAL = Iface # let the json api know which methods are external
def __init__(self, core):
self.core = core
def _convertPyFile(self, p):
f = FileData(p["id"], p["url"], p["name"], p["plugin"], p["size"],
p["format_size"], p["status"], p["statusmsg"],
p["package"], p["error"], p["order"])
return f
def _convertConfigFormat(self, c):
sections = {}
for sectionName, sub in c.iteritems():
section = ConfigSection(sectionName, sub["desc"])
items = []
for key, data in sub.iteritems():
if key in ("desc", "outline"):
continue
item = ConfigItem()
item.name = key
item.description = data["desc"]
item.value = str(data["value"]) if not isinstance(data["value"], basestring) else data["value"]
item.type = data["type"]
items.append(item)
section.items = items
sections[sectionName] = section
if "outline" in sub:
section.outline = sub["outline"]
return sections
@permission(PERMS.SETTINGS)
def getConfigValue(self, category, option, section="core"):
"""Retrieve config value.
:param category: name of category, or plugin
:param option: config option
:param section: 'plugin' or 'core'
:return: config value as string
"""
if section == "core":
value = self.core.config[category][option]
else:
value = self.core.config.getPlugin(category, option)
return str(value) if not isinstance(value, basestring) else value
@permission(PERMS.SETTINGS)
def setConfigValue(self, category, option, value, section="core"):
"""Set new config value.
:param category:
:param option:
:param value: new config value
:param section: 'plugin' or 'core
"""
self.core.hookManager.dispatchEvent("configChanged", category, option, value, section)
if section == "core":
self.core.config[category][option] = value
if option in ("limit_speed", "max_speed"): #not so nice to update the limit
self.core.requestFactory.updateBucket()
elif section == "plugin":
self.core.config.setPlugin(category, option, value)
@permission(PERMS.SETTINGS)
def getConfig(self):
"""Retrieves complete config of core.
:return: list of `ConfigSection`
"""
return self._convertConfigFormat(self.core.config.config)
def getConfigDict(self):
"""Retrieves complete config in dict format, not for RPC.
:return: dict
"""
return self.core.config.config
@permission(PERMS.SETTINGS)
def getPluginConfig(self):
"""Retrieves complete config for all plugins.
:return: list of `ConfigSection`
"""
return self._convertConfigFormat(self.core.config.plugin)
def getPluginConfigDict(self):
"""Plugin config as dict, not for RPC.
:return: dict
"""
return self.core.config.plugin
@permission(PERMS.STATUS)
def pauseServer(self):
"""Pause server: Tt wont start any new downloads, but nothing gets aborted."""
self.core.threadManager.pause = True
@permission(PERMS.STATUS)
def unpauseServer(self):
"""Unpause server: New Downloads will be started."""
self.core.threadManager.pause = False
@permission(PERMS.STATUS)
def togglePause(self):
"""Toggle pause state.
:return: new pause state
"""
self.core.threadManager.pause ^= True
return self.core.threadManager.pause
@permission(PERMS.STATUS)
def toggleReconnect(self):
"""Toggle reconnect activation.
:return: new reconnect state
"""
self.core.config["reconnect"]["activated"] ^= True
return self.core.config["reconnect"]["activated"]
@permission(PERMS.LIST)
def statusServer(self):
"""Some general information about the current status of pyLoad.
:return: `ServerStatus`
"""
serverStatus = ServerStatus(self.core.threadManager.pause, len(self.core.threadManager.processingIds()),
self.core.files.getQueueCount(), self.core.files.getFileCount(), 0,
not self.core.threadManager.pause and self.isTimeDownload(),
self.core.config['reconnect']['activated'] and self.isTimeReconnect())
for pyfile in [x.active for x in self.core.threadManager.threads if x.active and isinstance(x.active, PyFile)]:
serverStatus.speed += pyfile.getSpeed() #bytes/s
return serverStatus
@permission(PERMS.STATUS)
def freeSpace(self):
"""Available free space at download directory in bytes"""
return freeSpace(self.core.config["general"]["download_folder"])
@permission(PERMS.ALL)
def getServerVersion(self):
"""pyLoad Core version """
return self.core.version
def kill(self):
"""Clean way to quit pyLoad"""
self.core.do_kill = True
def restart(self):
"""Restart pyload core"""
self.core.do_restart = True
@permission(PERMS.LOGS)
def getLog(self, offset=0):
"""Returns most recent log entries.
:param offset: line offset
:return: List of log entries
"""
filename = join(self.core.config['log']['log_folder'], 'log.txt')
try:
fh = open(filename, "r")
lines = fh.readlines()
fh.close()
if offset >= len(lines):
return []
return lines[offset:]
except:
return ['No log available']
@permission(PERMS.STATUS)
def isTimeDownload(self):
"""Checks if pyload will start new downloads according to time in config.
:return: bool
"""
start = self.core.config['downloadTime']['start'].split(":")
end = self.core.config['downloadTime']['end'].split(":")
return compare_time(start, end)
@permission(PERMS.STATUS)
def isTimeReconnect(self):
"""Checks if pyload will try to make a reconnect
:return: bool
"""
start = self.core.config['reconnect']['startTime'].split(":")
end = self.core.config['reconnect']['endTime'].split(":")
return compare_time(start, end) and self.core.config["reconnect"]["activated"]
@permission(PERMS.LIST)
def statusDownloads(self):
""" Status off all currently running downloads.
:return: list of `DownloadStatus`
"""
data = []
for pyfile in self.core.threadManager.getActiveFiles():
if not isinstance(pyfile, PyFile):
continue
data.append(DownloadInfo(
pyfile.id, pyfile.name, pyfile.getSpeed(), pyfile.getETA(), pyfile.formatETA(),
pyfile.getBytesLeft(), pyfile.getSize(), pyfile.formatSize(), pyfile.getPercent(),
pyfile.status, pyfile.getStatusName(), pyfile.formatWait(),
pyfile.waitUntil, pyfile.packageid, pyfile.package().name, pyfile.pluginname))
return data
@permission(PERMS.ADD)
def addPackage(self, name, links, dest=Destination.Queue):
"""Adds a package, with links to desired destination.
:param name: name of the new package
:param links: list of urls
:param dest: `Destination`
:return: package id of the new package
"""
if self.core.config['general']['folder_per_package']:
folder = name
else:
folder = ""
folder = folder.replace("http://", "").replace(":", "").replace("/", "_").replace("\\", "_")
pid = self.core.files.addPackage(name, folder, dest)
self.core.files.addLinks(links, pid)
self.core.log.info(_("Added package %(name)s containing %(count)d links") % {"name": name, "count": len(links)})
self.core.files.save()
return pid
@permission(PERMS.ADD)
def parseURLs(self, html=None, url=None):
"""Parses html content or any arbitaty text for links and returns result of `checkURLs`
:param html: html source
:return:
"""
urls = []
if html:
urls += [x[0] for x in urlmatcher.findall(html)]
if url:
page = getURL(url)
urls += [x[0] for x in urlmatcher.findall(page)]
# remove duplicates
return self.checkURLs(set(urls))
@permission(PERMS.ADD)
def checkURLs(self, urls):
""" Gets urls and returns pluginname mapped to list of matches urls.
:param urls:
:return: {plugin: urls}
"""
data = self.core.pluginManager.parseUrls(urls)
plugins = {}
for url, plugin in data:
if plugin in plugins:
plugins[plugin].append(url)
else:
plugins[plugin] = [url]
return plugins
@permission(PERMS.ADD)
def checkOnlineStatus(self, urls):
""" initiates online status check
:param urls:
:return: initial set of data as `OnlineCheck` instance containing the result id
"""
data = self.core.pluginManager.parseUrls(urls)
rid = self.core.threadManager.createResultThread(data, False)
tmp = [(url, (url, OnlineStatus(url, pluginname, "unknown", 3, 0))) for url, pluginname in data]
data = parseNames(tmp)
result = {}
for k, v in data.iteritems():
for url, status in v:
status.packagename = k
result[url] = status
return OnlineCheck(rid, result)
@permission(PERMS.ADD)
def checkOnlineStatusContainer(self, urls, container, data):
""" checks online status of urls and a submited container file
:param urls: list of urls
:param container: container file name
:param data: file content
:return: online check
"""
th = open(join(self.core.config["general"]["download_folder"], "tmp_" + container), "wb")
th.write(str(data))
th.close()
return self.checkOnlineStatus(urls + [th.name])
@permission(PERMS.ADD)
def pollResults(self, rid):
""" Polls the result available for ResultID
:param rid: `ResultID`
:return: `OnlineCheck`, if rid is -1 then no more data available
"""
result = self.core.threadManager.getInfoResult(rid)
if "ALL_INFO_FETCHED" in result:
del result["ALL_INFO_FETCHED"]
return OnlineCheck(-1, result)
else:
return OnlineCheck(rid, result)
@permission(PERMS.ADD)
def generatePackages(self, links):
""" Parses links, generates packages names from urls
:param links: list of urls
:return: package names mapped to urls
"""
result = parseNames((x, x) for x in links)
return result
@permission(PERMS.ADD)
def generateAndAddPackages(self, links, dest=Destination.Queue):
"""Generates and add packages
:param links: list of urls
:param dest: `Destination`
:return: list of package ids
"""
return [self.addPackage(name, urls, dest) for name, urls
in self.generatePackages(links).iteritems()]
@permission(PERMS.ADD)
def checkAndAddPackages(self, links, dest=Destination.Queue):
"""Checks online status, retrieves names, and will add packages.\
Because of this packages are not added immediatly, only for internal use.
:param links: list of urls
:param dest: `Destination`
:return: None
"""
data = self.core.pluginManager.parseUrls(links)
self.core.threadManager.createResultThread(data, True)
@permission(PERMS.LIST)
def getPackageData(self, pid):
"""Returns complete information about package, and included files.
:param pid: package id
:return: `PackageData` with .links attribute
"""
data = self.core.files.getPackageData(int(pid))
if not data:
raise PackageDoesNotExists(pid)
pdata = PackageData(data["id"], data["name"], data["folder"], data["site"], data["password"],
data["queue"], data["order"],
links=[self._convertPyFile(x) for x in data["links"].itervalues()])
return pdata
@permission(PERMS.LIST)
def getPackageInfo(self, pid):
"""Returns information about package, without detailed information about containing files
:param pid: package id
:return: `PackageData` with .fid attribute
"""
data = self.core.files.getPackageData(int(pid))
if not data:
raise PackageDoesNotExists(pid)
pdata = PackageData(data["id"], data["name"], data["folder"], data["site"], data["password"],
data["queue"], data["order"],
fids=[int(x) for x in data["links"]])
return pdata
@permission(PERMS.LIST)
def getFileData(self, fid):
"""Get complete information about a specific file.
:param fid: file id
:return: `FileData`
"""
info = self.core.files.getFileData(int(fid))
if not info:
raise FileDoesNotExists(fid)
fdata = self._convertPyFile(info.values()[0])
return fdata
@permission(PERMS.DELETE)
def deleteFiles(self, fids):
"""Deletes several file entries from pyload.
:param fids: list of file ids
"""
for id in fids:
self.core.files.deleteLink(int(id))
self.core.files.save()
@permission(PERMS.DELETE)
def deletePackages(self, pids):
"""Deletes packages and containing links.
:param pids: list of package ids
"""
for id in pids:
self.core.files.deletePackage(int(id))
self.core.files.save()
@permission(PERMS.LIST)
def getQueue(self):
"""Returns info about queue and packages, **not** about files, see `getQueueData` \
or `getPackageData` instead.
:return: list of `PackageInfo`
"""
return [PackageData(pack["id"], pack["name"], pack["folder"], pack["site"],
pack["password"], pack["queue"], pack["order"],
pack["linksdone"], pack["sizedone"], pack["sizetotal"],
pack["linkstotal"])
for pack in self.core.files.getInfoData(Destination.Queue).itervalues()]
@permission(PERMS.LIST)
def getQueueData(self):
"""Return complete data about everything in queue, this is very expensive use it sparely.\
See `getQueue` for alternative.
:return: list of `PackageData`
"""
return [PackageData(pack["id"], pack["name"], pack["folder"], pack["site"],
pack["password"], pack["queue"], pack["order"],
pack["linksdone"], pack["sizedone"], pack["sizetotal"],
links=[self._convertPyFile(x) for x in pack["links"].itervalues()])
for pack in self.core.files.getCompleteData(Destination.Queue).itervalues()]
@permission(PERMS.LIST)
def getCollector(self):
"""same as `getQueue` for collector.
:return: list of `PackageInfo`
"""
return [PackageData(pack["id"], pack["name"], pack["folder"], pack["site"],
pack["password"], pack["queue"], pack["order"],
pack["linksdone"], pack["sizedone"], pack["sizetotal"],
pack["linkstotal"])
for pack in self.core.files.getInfoData(Destination.Collector).itervalues()]
@permission(PERMS.LIST)
def getCollectorData(self):
"""same as `getQueueData` for collector.
:return: list of `PackageInfo`
"""
return [PackageData(pack["id"], pack["name"], pack["folder"], pack["site"],
pack["password"], pack["queue"], pack["order"],
pack["linksdone"], pack["sizedone"], pack["sizetotal"],
links=[self._convertPyFile(x) for x in pack["links"].itervalues()])
for pack in self.core.files.getCompleteData(Destination.Collector).itervalues()]
@permission(PERMS.ADD)
def addFiles(self, pid, links):
"""Adds files to specific package.
:param pid: package id
:param links: list of urls
"""
self.core.files.addLinks(links, int(pid))
self.core.log.info(_("Added %(count)d links to package #%(package)d ") % {"count": len(links), "package": pid})
self.core.files.save()
@permission(PERMS.MODIFY)
def pushToQueue(self, pid):
"""Moves package from Collector to Queue.
:param pid: package id
"""
self.core.files.setPackageLocation(pid, Destination.Queue)
@permission(PERMS.MODIFY)
def pullFromQueue(self, pid):
"""Moves package from Queue to Collector.
:param pid: package id
"""
self.core.files.setPackageLocation(pid, Destination.Collector)
@permission(PERMS.MODIFY)
def restartPackage(self, pid):
"""Restarts a package, resets every containing files.
:param pid: package id
"""
self.core.files.restartPackage(int(pid))
@permission(PERMS.MODIFY)
def restartFile(self, fid):
"""Resets file status, so it will be downloaded again.
:param fid: file id
"""
self.core.files.restartFile(int(fid))
@permission(PERMS.MODIFY)
def recheckPackage(self, pid):
"""Proofes online status of all files in a package, also a default action when package is added.
:param pid:
:return:
"""
self.core.files.reCheckPackage(int(pid))
@permission(PERMS.MODIFY)
def stopAllDownloads(self):
"""Aborts all running downloads."""
pyfiles = self.core.files.cache.values()
for pyfile in pyfiles:
pyfile.abortDownload()
@permission(PERMS.MODIFY)
def stopDownloads(self, fids):
"""Aborts specific downloads.
:param fids: list of file ids
:return:
"""
pyfiles = self.core.files.cache.values()
for pyfile in pyfiles:
if pyfile.id in fids:
pyfile.abortDownload()
@permission(PERMS.MODIFY)
def setPackageName(self, pid, name):
"""Renames a package.
:param pid: package id
:param name: new package name
"""
pack = self.core.files.getPackage(pid)
pack.name = name
pack.sync()
@permission(PERMS.MODIFY)
def movePackage(self, destination, pid):
"""Set a new package location.
:param destination: `Destination`
:param pid: package id
"""
if destination not in (0, 1): return
self.core.files.setPackageLocation(pid, destination)
@permission(PERMS.MODIFY)
def moveFiles(self, fids, pid):
"""Move multiple files to another package
:param fids: list of file ids
:param pid: destination package
:return:
"""
#TODO: implement
pass
@permission(PERMS.ADD)
def uploadContainer(self, filename, data):
"""Uploads and adds a container file to pyLoad.
:param filename: filename, extension is important so it can correctly decrypted
:param data: file content
"""
th = open(join(self.core.config["general"]["download_folder"], "tmp_" + filename), "wb")
th.write(str(data))
th.close()
self.addPackage(th.name, [th.name], Destination.Queue)
@permission(PERMS.MODIFY)
def orderPackage(self, pid, position):
"""Gives a package a new position.
:param pid: package id
:param position:
"""
self.core.files.reorderPackage(pid, position)
@permission(PERMS.MODIFY)
def orderFile(self, fid, position):
"""Gives a new position to a file within its package.
:param fid: file id
:param position:
"""
self.core.files.reorderFile(fid, position)
@permission(PERMS.MODIFY)
def setPackageData(self, pid, data):
"""Allows to modify several package attributes.
:param pid: package id
:param data: dict that maps attribute to desired value
"""
p = self.core.files.getPackage(pid)
if not p: raise PackageDoesNotExists(pid)
for key, value in data.iteritems():
if key == "id": continue
setattr(p, key, value)
p.sync()
self.core.files.save()
@permission(PERMS.DELETE)
def deleteFinished(self):
"""Deletes all finished files and completly finished packages.
:return: list of deleted package ids
"""
return self.core.files.deleteFinishedLinks()
@permission(PERMS.MODIFY)
def restartFailed(self):
"""Restarts all failed failes."""
self.core.files.restartFailed()
@permission(PERMS.LIST)
def getPackageOrder(self, destination):
"""Returns information about package order.
:param destination: `Destination`
:return: dict mapping order to package id
"""
packs = self.core.files.getInfoData(destination)
order = {}
for pid in packs:
pack = self.core.files.getPackageData(int(pid))
while pack["order"] in order.keys(): #just in case
pack["order"] += 1
order[pack["order"]] = pack["id"]
return order
@permission(PERMS.LIST)
def getFileOrder(self, pid):
"""Information about file order within package.
:param pid:
:return: dict mapping order to file id
"""
rawData = self.core.files.getPackageData(int(pid))
order = {}
for id, pyfile in rawData["links"].iteritems():
while pyfile["order"] in order.keys(): #just in case
pyfile["order"] += 1
order[pyfile["order"]] = pyfile["id"]
return order
@permission(PERMS.STATUS)
def isCaptchaWaiting(self):
"""Indicates wether a captcha task is available
:return: bool
"""
self.core.lastClientConnected = time()
task = self.core.captchaManager.getTask()
return not task is None
@permission(PERMS.STATUS)
def getCaptchaTask(self, exclusive=False):
"""Returns a captcha task
:param exclusive: unused
:return: `CaptchaTask`
"""
self.core.lastClientConnected = time()
task = self.core.captchaManager.getTask()
if task:
task.setWatingForUser(exclusive=exclusive)
data, type, result = task.getCaptcha()
t = CaptchaTask(int(task.id), standard_b64encode(data), type, result)
return t
else:
return CaptchaTask(-1)
@permission(PERMS.STATUS)
def getCaptchaTaskStatus(self, tid):
"""Get information about captcha task
:param tid: task id
:return: string
"""
self.core.lastClientConnected = time()
t = self.core.captchaManager.getTaskByID(tid)
return t.getStatus() if t else ""
@permission(PERMS.STATUS)
def setCaptchaResult(self, tid, result):
"""Set result for a captcha task
:param tid: task id
:param result: captcha result
"""
self.core.lastClientConnected = time()
task = self.core.captchaManager.getTaskByID(tid)
if task:
task.setResult(result)
self.core.captchaManager.removeTask(task)
@permission(PERMS.STATUS)
def getEvents(self, uuid):
"""Lists occured events, may be affected to changes in future.
:param uuid:
:return: list of `Events`
"""
events = self.core.pullManager.getEvents(uuid)
newEvents = []
def convDest(d):
return Destination.Queue if d == "queue" else Destination.Collector
for e in events:
event = EventInfo()
event.eventname = e[0]
if e[0] in ("update", "remove", "insert"):
event.id = e[3]
event.type = ElementType.Package if e[2] == "pack" else ElementType.File
event.destination = convDest(e[1])
elif e[0] == "order":
if e[1]:
event.id = e[1]
event.type = ElementType.Package if e[2] == "pack" else ElementType.File
event.destination = convDest(e[3])
elif e[0] == "reload":
event.destination = convDest(e[1])
newEvents.append(event)
return newEvents
@permission(PERMS.ACCOUNTS)
def getAccounts(self, refresh):
"""Get information about all entered accounts.
:param refresh: reload account info
:return: list of `AccountInfo`
"""
accs = self.core.accountManager.getAccountInfos(False, refresh)
accounts = []
for group in accs.values():
accounts.extend([AccountInfo(acc["validuntil"], acc["login"], acc["options"], acc["valid"],
acc["trafficleft"], acc["maxtraffic"], acc["premium"], acc["type"])
for acc in group])
return accounts
@permission(PERMS.ALL)
def getAccountTypes(self):
"""All available account types.
:return: list
"""
return self.core.accountManager.accounts.keys()
@permission(PERMS.ACCOUNTS)
def updateAccount(self, plugin, account, password=None, options={}):
"""Changes pw/options for specific account."""
self.core.accountManager.updateAccount(plugin, account, password, options)
@permission(PERMS.ACCOUNTS)
def removeAccount(self, plugin, account):
"""Remove account from pyload.
:param plugin: pluginname
:param account: accountname
"""
self.core.accountManager.removeAccount(plugin, account)
@permission(PERMS.ALL)
def login(self, username, password, remoteip=None):
"""Login into pyLoad, this **must** be called when using rpc before any methods can be used.
:param username:
:param password:
:param remoteip: Omit this argument, its only used internal
:return: bool indicating login was successful
"""
return True if self.checkAuth(username, password, remoteip) else False
def checkAuth(self, username, password, remoteip=None):
"""Check authentication and returns details
:param username:
:param password:
:param remoteip:
:return: dict with info, empty when login is incorrect
"""
if self.core.config["remote"]["nolocalauth"] and remoteip == "127.0.0.1":
return "local"
if self.core.startedInGui and remoteip == "127.0.0.1":
return "local"
return self.core.db.checkAuth(username, password)
def isAuthorized(self, func, userdata):
"""checks if the user is authorized for specific method
:param func: function name
:param userdata: dictionary of user data
:return: boolean
"""
if userdata == "local" or userdata["role"] == ROLE.ADMIN:
return True
elif func in permMap and has_permission(userdata["permission"], permMap[func]):
return True
else:
return False
@permission(PERMS.ALL)
def getUserData(self, username, password):
"""similar to `checkAuth` but returns UserData thrift type """
user = self.checkAuth(username, password)
if user:
return UserData(user["name"], user["email"], user["role"], user["permission"], user["template"])
else:
return UserData()
def getAllUserData(self):
"""returns all known user and info"""
res = {}
for user, data in self.core.db.getAllUserData().iteritems():
res[user] = UserData(user, data["email"], data["role"], data["permission"], data["template"])
return res
@permission(PERMS.STATUS)
def getServices(self):
""" A dict of available services, these can be defined by hook plugins.
:return: dict with this style: {"plugin": {"method": "description"}}
"""
data = {}
for plugin, funcs in self.core.hookManager.methods.iteritems():
data[plugin] = funcs
return data
@permission(PERMS.STATUS)
def hasService(self, plugin, func):
"""Checks wether a service is available.
:param plugin:
:param func:
:return: bool
"""
cont = self.core.hookManager.methods
return plugin in cont and func in cont[plugin]
@permission(PERMS.STATUS)
def call(self, info):
"""Calls a service (a method in hook plugin).
:param info: `ServiceCall`
:return: result
:raises: ServiceDoesNotExists, when its not available
:raises: ServiceException, when a exception was raised
"""
plugin = info.plugin
func = info.func
args = info.arguments
parse = info.parseArguments
if not self.hasService(plugin, func):
raise ServiceDoesNotExists(plugin, func)
try:
ret = self.core.hookManager.callRPC(plugin, func, args, parse)
return str(ret)
except Exception, e:
raise ServiceException(e.message)
@permission(PERMS.STATUS)
def getAllInfo(self):
"""Returns all information stored by hook plugins. Values are always strings
:return: {"plugin": {"name": value } }
"""
return self.core.hookManager.getAllInfo()
@permission(PERMS.STATUS)
def getInfoByPlugin(self, plugin):
"""Returns information stored by a specific plugin.
:param plugin: pluginname
:return: dict of attr names mapped to value {"name": value}
"""
return self.core.hookManager.getInfo(plugin)
def changePassword(self, user, oldpw, newpw):
""" changes password for specific user """
return self.core.db.changePassword(user, oldpw, newpw)
def setUserPermission(self, user, permission, role):
self.core.db.setPermission(user, permission)
self.core.db.setRole(user, role) | gpl-3.0 |
joerg84/arangodb | 3rdParty/V8/v5.7.0.0/tools/gyp/test/win/gyptest-link-update-manifest.py | 226 | 3008 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure binary is relinked when manifest settings are changed.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
gyp_template = '''
{
'targets': [
{
'target_name': 'test_update_manifest',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'EnableUAC': 'true',
'UACExecutionLevel': '%(uac_execution_level)d',
},
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': '%(additional_manifest_files)s',
},
},
},
],
}
'''
gypfile = 'update-manifest.gyp'
def WriteAndUpdate(uac_execution_level, additional_manifest_files, do_build):
with open(os.path.join(CHDIR, gypfile), 'wb') as f:
f.write(gyp_template % {
'uac_execution_level': uac_execution_level,
'additional_manifest_files': additional_manifest_files,
})
test.run_gyp(gypfile, chdir=CHDIR)
if do_build:
test.build(gypfile, chdir=CHDIR)
exe_file = test.built_file_path('test_update_manifest.exe', chdir=CHDIR)
return extract_manifest(exe_file, 1)
manifest = WriteAndUpdate(0, '', True)
test.fail_test('asInvoker' not in manifest)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' in manifest)
# Make sure that updating .gyp and regenerating doesn't cause a rebuild.
WriteAndUpdate(0, '', False)
test.up_to_date(gypfile, test.ALL, chdir=CHDIR)
# But make sure that changing a manifest property does cause a relink.
manifest = WriteAndUpdate(2, '', True)
test.fail_test('requireAdministrator' not in manifest)
# Adding a manifest causes a rebuild.
manifest = WriteAndUpdate(2, 'extra.manifest', True)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in manifest)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.