repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
ljmanso/AGM2
|
AGGLPlanner/agglplannerclient.py
|
1
|
1406
|
#sudo apt-get install pypy pypy-setuptools
#git clone https://github.com/eleme/thriftpy.git
#cd thriftpy
#make sudo pypy setup.py install
#from thriftpy.protocol.binary import TBinaryProtocolFactory
#from thriftpy.transport.buffered import TBufferedTransportFactory
#from thriftpy.transport.framed import TFramedTransportFactory
import thriftpy
agglplanner_thrift = thriftpy.load("/usr/local/share/agm/agglplanner.thrift", module_name="agglplanner_thrift")
from thriftpy.rpc import make_client
client = make_client(agglplanner_thrift.AGGLPlanner, '127.0.0.1', 6000)
print 'Get domain id...'
domainText = open('/home/robocomp/robocomp/components/robocomp-shelly/files/planningDomain/domain_min.aggl', 'r').read()
domainId = client.getDomainIdentifier(domainText)
print domainId
print 'Reading init world...'
initWorld = open('/home/robocomp/robocomp/components/robocomp-shelly/etc/initialModel_hybrid.xml', 'r').read()
print 'Get target id...'
targetText = open('/home/robocomp/robocomp/components/robocomp-shelly/etc/targetRestPosition.aggt', 'r').read()
print type(targetText), targetText
targetId = client.getTargetIdentifier(targetText)
print targetId
print 'Calling planner...'
jobIdentifier = client.startPlanning(domainId, initWorld, targetId, [], [])
print 'got job identifier', jobIdentifier
print 'Asking for results...'
result = client.getPlanningResults(jobIdentifier)
print result.plan
|
gpl-3.0
|
WeblateOrg/weblate
|
weblate/addons/management/commands/list_addons.py
|
1
|
3032
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from textwrap import wrap
from weblate.addons.models import ADDONS, Addon
from weblate.trans.models import Component, Project
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "List installed add-ons"
def handle(self, *args, **options):
"""List installed add-ons."""
fake_addon = Addon(component=Component(project=Project()))
for _unused, obj in sorted(ADDONS.items()):
self.stdout.write(f".. _addon-{obj.name}:")
self.stdout.write("\n")
self.stdout.write(obj.verbose)
self.stdout.write("-" * len(obj.verbose))
self.stdout.write("\n")
self.stdout.write(f":Add-on ID: ``{obj.name}``")
if obj.settings_form:
form = obj(fake_addon).get_settings_form(None)
table = [
(f"``{name}``", str(field.label), str(field.help_text))
for name, field in form.fields.items()
]
prefix = ":Configuration: "
name_width = max(len(row[0]) for row in table)
label_width = max(len(row[1]) for row in table)
help_text_width = max(len(row[2]) for row in table)
name_row = "-" * (name_width + 2)
label_row = "-" * (label_width + 2)
help_text_row = "-" * (help_text_width + 2)
for name, label, help_text in table:
if not prefix.isspace():
self.stdout.write(
f"{prefix}+{name_row}+{label_row}+{help_text_row}+"
)
prefix = " "
self.stdout.write(
f"{prefix}| {name:<{name_width}s} | {label:<{label_width}s} | {help_text:<{help_text_width}s} |"
)
self.stdout.write(
f"{prefix}+{name_row}+{label_row}+{help_text_row}+"
)
else:
self.stdout.write(":Configuration: `This add-on has no configuration.`")
self.stdout.write("\n")
self.stdout.write("\n".join(wrap(obj.description, 79)))
self.stdout.write("\n")
|
gpl-3.0
|
Antiun/purchase-workflow
|
mrp_smart_purchase/mrp_smart_purchase.py
|
27
|
4265
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import Model
class MrpProcurement(Model):
"""Mrp Procurement we override action_po assing to get the cheapest
supplier, if you want to change priority parameters just change the
_supplier_to_tuple function TODO remove hack if merge proposal accepted
look in action_po_assing for details"""
_inherit = "procurement.order"
def action_po_assign(self, cursor, uid, ids, context=None):
context = context or {}
# stack is prduct id : qty
# this is a hack beacause make_po hase no function
# get supplier so I pass requiered data in context
# I know that sucks but OpenEPR wont change this function in stable
# relase Merge proposal for trunkis running
context['smart_mrp_stack'] = {}
for proc in self.browse(cursor, uid, ids, context):
context['smart_mrp_stack'][proc.product_id.id] = proc.product_qty
res = super(MrpProcurement, self).action_po_assign(
cursor, uid, ids, context=context)
return res
class ProductTemplate(Model):
""" We overrride the get_main_supplier function
that is used to retrieve supplier in function fields"""
_name = "product.template"
_inherit = "product.template"
def _supplier_to_tuple(self, cursor, uid, supplier_id, price, product_id):
""" Generate an tuple that can be sorted """
# This is not the most performat way but it allows easy overriding
# the faster solution will be to populate a mapping hash in
# _get_main_product_supplier
info_obj = self.pool.get('product.supplierinfo')
info_id = info_obj.search(
cursor, uid, [('product_id', '=', product_id),
('name', '=', supplier_id)], order='sequence')[0]
info = info_obj.browse(cursor, uid, info_id)
res_tuple = (price, info.delay, info.sequence or 10000, info.id)
return res_tuple
def _get_main_product_supplier(self, cursor, uid, product, context=None):
"""Determines the main (best) product supplier for ``product``,
using smart_mrp_stack in context to determine qty else it uses sequence
"""
info_obj = self.pool.get('product.supplierinfo')
context = context or {}
smart_mrp_stack = context.get('smart_mrp_stack', {})
if product.id in smart_mrp_stack:
# we look for best prices based on supplier info
sellers = product.seller_ids
supplier_ids = [x.name.id for x in sellers]
qty = smart_mrp_stack.get(product.id, 1)
best_prices_persupplier = info_obj.price_get(
cursor, uid, supplier_ids,
product.id, qty, context=context)
# Assmuption to sort price is more important than delay
final_choice = []
for supp, price in best_prices_persupplier.items():
final_choice.append(
self._supplier_to_tuple(cursor, uid, supp, price,
product.id))
final_choice.sort()
return info_obj.browse(cursor, uid, final_choice[0][3])
else:
return super(ProductTemplate, self)._get_main_product_supplier(
cursor, uid, product, context)
return False
|
agpl-3.0
|
OscarPDR/labman_ud
|
labman_ud/entities/publications/views.py
|
1
|
23606
|
# -*- coding: utf-8 -*-
import threading
import weakref
import re
# from django.template.defaultfilters import slugify
from django.core import serializers
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from .forms import PublicationSearchForm
from .models import *
from entities.persons.models import Person
from entities.projects.models import Project, RelatedPublication
from entities.utils.models import Tag
from labman_setup.models import *
from labman_ud.util import *
from collections import OrderedDict, Counter
### publication_index
####################################################################################################
def _validate_term(token, name, numeric=False):
if not token.startswith(name):
return False
remainder = token[len(name):]
if not remainder:
return False
if numeric:
try:
int(remainder)
except:
return False
return True
def publication_index(request, tag_slug=None, publication_type=None, query_string=None, page=1):
tag = None
form_from_year = None
form_from_range = None
form_to_year = None
form_to_range = None
form_publication_types = None
form_tags = None
form_authors_name = []
form_editors_name = []
clean_index = False
request.session['max_publication_year'] = MAX_YEAR_LIMIT
request.session['min_publication_year'] = MIN_YEAR_LIMIT
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
publication_ids = PublicationTag.objects.filter(tag=tag).values('publication_id')
publications = Publication.objects.filter(id__in=publication_ids).prefetch_related('authors')
if publication_type:
publications = Publication.objects.filter(child_type=publication_type)
if not tag_slug and not publication_type:
clean_index = True
publications = Publication.objects.all().prefetch_related('authors')
publications = publications.order_by('-year', '-title').exclude(authors=None)
if request.method == 'POST':
form_author_field_count = request.POST.get('author_field_count')
form_editor_field_count = request.POST.get('editor_field_count')
form = PublicationSearchForm(request.POST, extra_author=form_author_field_count,
extra_editor=form_editor_field_count)
if form.is_valid():
query_string = form.cleaned_data['text']
form_from_year = form.cleaned_data['from_year']
form_from_range = form.cleaned_data['from_range']
form_to_year = form.cleaned_data['to_year']
form_to_range = form.cleaned_data['to_range']
form_publication_types = form.cleaned_data['publication_types']
form_tags = form.cleaned_data['tags']
for my_tuple in reversed(form.fields.items()):
if my_tuple[0].startswith('editor_name_'):
form_editor_name = form.cleaned_data[my_tuple[0]]
if form_editor_name:
form_editors_name.append(form_editor_name)
elif my_tuple[0].startswith('author_name_'):
form_author_name = form.cleaned_data[my_tuple[0]]
if form_author_name:
form_authors_name.append(form_author_name)
elif not my_tuple[0].startswith('editor_name_'):
break
if form_from_year:
if form_from_range == '<':
publications = publications.filter(year__lt=form_from_year)
elif form_from_range == '<=':
publications = publications.filter(year__lte=form_from_year)
elif form_from_range == '>':
publications = publications.filter(year__gt=form_from_year)
elif form_from_range == '>=':
publications = publications.filter(year__gte=form_from_year)
elif form_from_range == '==':
publications = publications.filter(year=form_from_year)
if form_to_year:
if form_to_range == '<':
publications = publications.filter(year__lt=form_to_year)
elif form_to_range == '<=':
publications = publications.filter(year__lte=form_to_year)
if form_publication_types:
publications = publications.filter(child_type__in=form_publication_types)
print("LEN")
print(len(publications))
if form_tags:
publications = publications.filter(publicationtag__tag__name__in=form_tags)
found = True
if form_authors_name:
group_publication = []
for name in form_authors_name:
person_id = Person.objects.filter(slug__contains=slugify(name)).values_list('id', flat=True)
if person_id and found:
person_publications_set = set()
for _id in person_id:
person_publications = PublicationAuthor.objects.all().filter(author_id=_id).values_list('publication_id', flat=True)
if person_publications:
person_publications_set.update(person_publications)
group_publication.append(person_publications_set)
else:
found = False
if group_publication and found:
publications = publications.filter(id__in=list(set.intersection(*group_publication)))
if form_editors_name:
group_publication = []
for name in form_editors_name:
person_id = Person.objects.filter(slug__contains=slugify(name)).values_list('id', flat=True)
if person_id and found:
person_publications_set = set()
for _id in person_id:
print PublicationEditor.objects.all().values_list('editor__full_name', flat=True)
person_publications = PublicationEditor.objects.all().filter(editor_id=_id).values_list('publication_id', flat=True)
if person_publications:
person_publications_set.update(person_publications)
group_publication.append(person_publications_set)
else:
found = False
if group_publication and found:
publications = publications.filter(id__in=list(set.intersection(*group_publication)))
if not found:
publications = []
session_filter_dict = {
'query_string' : query_string,
'publications': serializers.serialize('json', publications),
'form_from_year' : form_from_year,
'form_from_range' : form_from_range,
'form_to_year' : form_to_year,
'form_to_range' : form_to_range,
'form_publication_types' : form_publication_types,
'form_tags' : form_tags,
'form_authors_name' : form_authors_name,
'form_editors_name' : form_editors_name,
'form_author_field_count' : len(form_authors_name),
'form_editor_field_count' : len(form_editors_name),
}
print('BUSQUEDA AVANZADA')
request.session['filtered'] = session_filter_dict
return HttpResponseRedirect(reverse('filtered_publication_query', kwargs={'page':'1'}))
else:
if 'filtered' in request.session.keys():
p = re.compile(ur'publications\/filtered(\/\?page=[1-9]+)?')
if re.search(p, request.path) == None:
# IF requested page is not filted, deletes session filter info.
del request.session['filtered']
# Loads default report.
form = PublicationSearchForm(extra_author=1, extra_editor=1)
else:
# IF requested page is filtered, loads info from session.
author_field_count = request.session['filtered']['form_author_field_count']
editor_field_count = request.session['filtered']['form_editor_field_count']
if author_field_count == 0:
author_field_count = 1
if editor_field_count == 0:
editor_field_count = 1
form = PublicationSearchForm(extra_author=author_field_count,
extra_editor=editor_field_count)
query_string = request.session['filtered']['query_string']
publications = []
for deserialized_object in serializers.deserialize('json',
request.session['filtered']['publications']):
publications.append(deserialized_object.object)
form_from_year = request.session['filtered']['form_from_year']
form_from_range = request.session['filtered']['form_from_range']
form_to_year = request.session['filtered']['form_to_year']
form_to_range = request.session['filtered']['form_to_range']
form_publication_types = []
for utf8type in request.session['filtered']['form_publication_types']:
form_publication_types.append(utf8type.encode('utf8'))
print("PUBTYPES")
print(form_publication_types)
form_tags = request.session['filtered']['form_tags']
form_authors_name = []
for utf8type in request.session['filtered']['form_authors_name']:
form_authors_name.append(utf8type.encode('utf8'))
form_editor_name = []
for utf8type in request.session['filtered']['form_editors_name']:
form_editors_name.append(utf8type.encode('utf8'))
clean_index = False
else:
form = PublicationSearchForm(extra_author=1, extra_editor=1)
if query_string:
# Given a query_string such as: author:"Oscar Pena" my "title word"; split in ['author:"Oscar Peña"','my','"title word"']
initial_tokens = query_string.lower().split()
tokens = []
quotes_open = False
current_token = ""
for token in initial_tokens:
if token.count('"') % 2 != 0:
if quotes_open:
# Close quotes
current_token += " " + token
tokens.append(current_token)
quotes_open = False
else:
current_token += token
quotes_open = True
else:
if quotes_open:
current_token += " " + token
else:
tokens.append(token)
if current_token:
tokens.append(current_token)
# Create filters that reduce the query size
NUMERIC_FILTERS = {
'year:': []
}
FILTERS = {
'author:': [],
'tag:': [],
'title:': [],
}
special_tokens = []
new_tokens = [] # E.g. 'author:"Aitor Almeida"' is converted to 'Aitor Almeida'
for token in tokens:
validated = False
for word in FILTERS:
if _validate_term(token, word):
special_tokens.append(token)
new_token = token[len(word):]
if new_token.startswith('"') and new_token.endswith('"'):
new_token = new_token[1:-1]
FILTERS[word].append(new_token)
new_tokens.append(new_token)
validated = True
break
if not validated:
for word in NUMERIC_FILTERS:
if _validate_term(token, word):
new_token = token[len(word):]
if new_token.startswith('"') and new_token.endswith('"'):
new_token = new_token[1:-1]
new_tokens.append(new_token)
NUMERIC_FILTERS[word].append(new_token)
special_tokens.append(token)
break
search_terms = [token for token in tokens if token not in special_tokens] + new_tokens
# Query by author and title. Fix by Unai Z & Ruben S
author_ids = PublicationAuthor.objects.filter(author__full_name__icontains=query_string).values_list('author__id', flat=True)
sql_query = Publication.objects.filter(Q(authors__in=author_ids) | Q(title__icontains=query_string)).exclude(authors=None).all().order_by('-year')
paginator = Paginator(sql_query, 10)
if page == 'filtered':
page = 1
clean_index = False
print('FILTRO')
else:
if page == None:
page = 1
if not 'filtered' in request.session.keys():
sql_query = Publication.objects.exclude(authors=None).all().prefetch_related('authors').order_by('-year')
paginator = Paginator(sql_query, 10)
else:
print("LOCOOOO")
paginator =Paginator(publications, 10)
# Retrieves all the publication types.
publications_ids = PublicationAuthor.objects.values_list('publication',
flat=True)
publication_types_info = Publication.objects.filter(
id__in=publications_ids).order_by().values('child_type').distinct()
# Retrieves all the tag names.
tags_id_info = Publication.objects.all().values_list('tags', flat=True)
tags_info = Tag.objects.filter(id__in=tags_id_info).order_by().values_list('name', flat=True)
# Retrieves all the full names of authors.
author_info = PublicationAuthor.objects.all() \
.distinct('author__full_name').order_by() \
.values_list('author__full_name', flat=True)
# Retrieves all the full names of editors.
editor_info = PublicationEditor.objects \
.distinct('editor__full_name').order_by() \
.values_list('editor__full_name', flat=True)
publication_model_list = [
'Book',
'BookSection',
'ConferencePaper',
'Journal',
'JournalArticle',
'Magazine',
'MagazineArticle',
'Proceedings',
'Publication',
'Thesis',
]
last_entry = get_last_model_update_log_entry('publications', publication_model_list)
try:
theses = Thesis.objects.all()
except:
theses = None
# dictionary to be returned in render(request, )
return_dict = {
'clean_index': clean_index,
'form': form,
'last_entry': last_entry,
'author_info': author_info,
'editor_info': editor_info,
'publication_type': publication_type,
'publication_types_info' : publication_types_info,
'publications': paginator.page(page),
'publications_length': paginator.count,
'query_string': query_string,
'tag': tag,
'publication_tags_info' : tags_info,
'theses': theses,
'form_from_year' : form_from_year,
'form_from_range' : form_from_range,
'form_to_year' : form_to_year,
'form_to_range' : form_to_range,
'form_publication_types' : form_publication_types,
'form_tags' : form_tags,
'form_authors_name' : form_authors_name,
'form_editors_name' : form_editors_name,
'web_title': u'Publications',
}
return render(request, 'publications/index.html', return_dict)
### publication_info
####################################################################################################
def publication_info(request, publication_slug):
publication = get_object_or_404(Publication, slug=publication_slug)
return_dict = __build_publication_return_dict(publication)
return_dict['web_title'] = publication.title
print(request)
return render(request, 'publications/info.html', return_dict)
def publication_ext_info(request, publication_slug):
publication = get_object_or_404(Publication, slug=publication_slug)
return_dict = __build_publication_return_dict(publication)
return_dict['web_title'] = publication.title
return render(request, 'publications/extended_info.html', return_dict)
def publication_related_projects(request, publication_slug):
publication = get_object_or_404(Publication, slug=publication_slug)
return_dict = __build_publication_return_dict(publication)
return_dict['web_title'] = u'%s - Related projects' % publication.title
return render(request, 'publications/related_projects.html', return_dict)
def publication_related_publications(request, publication_slug):
publication = get_object_or_404(Publication, slug=publication_slug)
return_dict = __build_publication_return_dict(publication)
return_dict['web_title'] = u'%s - Related publications' % publication.title
return render(request, 'publications/related_publications.html', return_dict)
def __build_publication_return_dict(publication):
author_ids = PublicationAuthor.objects.filter(publication=publication.id).values('author_id').order_by('position')
authors = []
for _id in author_ids:
author = Person.objects.get(id=_id['author_id'])
authors.append(author)
related_projects_ids = RelatedPublication.objects.filter(publication=publication.id).values('project_id')
related_projects = Project.objects.filter(id__in=related_projects_ids)
related_publications_ids = RelatedPublication.objects.filter(project_id__in=related_projects_ids).values('publication_id')
related_publications = Publication.objects.filter(id__in=related_publications_ids).exclude(id=publication.id)
tag_ids = PublicationTag.objects.filter(publication=publication.id).values('tag_id')
tag_list = Tag.objects.filter(id__in=tag_ids).order_by('name')
try:
pdf = publication.pdf
except:
pdf = None
parent_publication = None
try:
if publication.child_type == 'ConferencePaper':
publication = ConferencePaper.objects.get(slug=publication.slug)
parent_publication = Proceedings.objects.get(id=publication.parent_proceedings.id)
if publication.child_type == 'JournalArticle':
publication = JournalArticle.objects.get(slug=publication.slug)
parent_publication = Journal.objects.get(id=publication.parent_journal.id)
if publication.child_type == 'MagazineArticle':
publication = MagazineArticle.objects.get(slug=publication.slug)
parent_publication = Magazine.objects.get(id=publication.parent_magazine.id)
if publication.child_type == 'BookSection':
publication = BookSection.objects.get(slug=publication.slug)
parent_publication = Book.objects.get(id=publication.parent_book.id)
if publication.child_type == 'Book':
publication = Book.objects.get(slug=publication.slug)
parent_publication = None
except:
pass
if publication.bibtex:
bibtex = publication.bibtex.replace(",", ",\n")
else:
bibtex = None
rankings = set()
try:
for publication_rank in PublicationRank.objects.filter(publication=publication):
rankings.add(publication_rank.ranking)
except:
pass
if parent_publication:
try:
for publication_rank in PublicationRank.objects.filter(publication=parent_publication):
rankings.add(publication_rank.ranking)
except:
pass
# dictionary to be returned in render(request, )
return {
'authors': authors,
'bibtex': bibtex,
'parent_publication': parent_publication,
'pdf': pdf,
'publication': publication,
'rankings': list(rankings),
'related_projects': related_projects,
'related_publications': related_publications,
'tag_list': tag_list,
}
####################################################################################################
# Feed: publications feeds
####################################################################################################
class LatestPublicationsFeed(Feed):
def __init__(self, *args, **kwargs):
super(LatestPublicationsFeed, self).__init__(*args, **kwargs)
self.__request = threading.local()
try:
_settings = LabmanDeployGeneralSettings.objects.get()
research_group_short_name = _settings.research_group_short_name
except:
research_group_short_name = u'Our'
title = u'%s publications' % research_group_short_name
description = u'%s publications' % research_group_short_name
def get_object(self, request):
self.__request.request = weakref.proxy(request)
return super(LatestPublicationsFeed, self).get_object(request)
def link(self, obj):
url = reverse('publication_indexs')
return self.__request.request.build_absolute_uri(url)
def items(self):
return Publication.objects.order_by('-id')[:30]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.abstract
def item_link(self, item):
url = reverse('publication_info', args=[item.slug or 'no-slug-found'])
return self.__request.request.build_absolute_uri(url)
### phd_dissertations_index
####################################################################################################
def phd_dissertations_index(request):
phd_dissertations = []
theses = Thesis.objects.order_by('-viva_date', 'author__full_name')
for thesis in theses:
phd_dissertation = {}
phd_dissertation['thesis'] = thesis
co_advisors = CoAdvisor.objects.filter(thesis=thesis)
if co_advisors:
phd_dissertation['co_advisors'] = []
for co_advisor in co_advisors:
phd_dissertation['co_advisors'].append(co_advisor.co_advisor.full_name)
phd_dissertations.append(phd_dissertation)
# dictionary to be returned in render(request, )
return_dict = {
'web_title': u'PhD dissertations',
'phd_dissertations': phd_dissertations,
}
return render(request, 'publications/phd_dissertations_index.html', return_dict)
def pretty_request(request):
headers = ''
for header, value in request.META.items():
if not header.startswith('HTTP'):
continue
header = '-'.join([h.capitalize() for h in header[5:].lower().split('_')])
headers += '{}: {}\n'.format(header, value)
return (
'{method} HTTP/1.1\n'
'Content-Length: {content_length}\n'
'Content-Type: {content_type}\n'
'{headers}\n\n'
'{body}'
).format(
method=request.method,
content_length=request.META['CONTENT_LENGTH'],
content_type=request.META['CONTENT_TYPE'],
headers=headers,
body=request.body,
)
|
gpl-3.0
|
b-dollery/testing
|
lib/ansible/runner/action_plugins/group_by.py
|
134
|
4249
|
# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible.callbacks import vv
from ansible.errors import AnsibleError as ae
from ansible.runner.return_data import ReturnData
from ansible.utils import parse_kv, check_conditional
import ansible.utils.template as template
class ActionModule(object):
''' Create inventory groups based on variables '''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# the group_by module does not need to pay attention to check mode.
# it always runs.
# module_args and complex_args have already been templated for the first host.
# Use them here only to check that a key argument is provided.
args = {}
if complex_args:
args.update(complex_args)
args.update(parse_kv(module_args))
if not 'key' in args:
raise ae("'key' is a required argument.")
vv("created 'group_by' ActionModule: key=%s"%(args['key']))
inventory = self.runner.inventory
result = {'changed': False}
### find all groups
groups = {}
for host in self.runner.host_set:
data = {}
data.update(inject)
data.update(inject['hostvars'][host])
conds = self.runner.conditional
if type(conds) != list:
conds = [ conds ]
next_host = False
for cond in conds:
if not check_conditional(cond, self.runner.basedir, data, fail_on_undefined=self.runner.error_on_undefined_vars):
next_host = True
break
if next_host:
continue
# Template original module_args and complex_args from runner for each host.
host_module_args = template.template(self.runner.basedir, self.runner.module_args, data)
host_complex_args = template.template(self.runner.basedir, self.runner.complex_args, data)
host_args = {}
if host_complex_args:
host_args.update(host_complex_args)
host_args.update(parse_kv(host_module_args))
group_name = host_args['key']
group_name = group_name.replace(' ','-')
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(host)
result['groups'] = groups
### add to inventory
for group, hosts in groups.items():
inv_group = inventory.get_group(group)
if not inv_group:
inv_group = ansible.inventory.Group(name=group)
inventory.add_group(inv_group)
inventory.get_group('all').add_child_group(inv_group)
inv_group.vars = inventory.get_group_variables(group, update_cached=False, vault_password=inventory._vault_password)
for host in hosts:
if host in self.runner.inventory._vars_per_host:
del self.runner.inventory._vars_per_host[host]
inv_host = inventory.get_host(host)
if not inv_host:
inv_host = ansible.inventory.Host(name=host)
if inv_group not in inv_host.get_groups():
result['changed'] = True
inv_group.add_host(inv_host)
return ReturnData(conn=conn, comm_ok=True, result=result)
|
gpl-3.0
|
oknuutti/visnav-py
|
visnav/render/stars.py
|
1
|
34131
|
from datetime import datetime
from functools import lru_cache
import cv2
import math
import os
import sqlite3
import re
import time
import numpy as np
import quaternion
from visnav.algo import tools
from visnav.algo.image import ImageProc
from visnav.algo.model import SystemModel
from visnav.missions.didymos import DidymosSystemModel
from visnav.missions.rosetta import RosettaSystemModel
from visnav.settings import *
# https://pysynphot.readthedocs.io/en/latest/index.html#pysynphot-installation-setup
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import importlib
mod = importlib.util.find_spec('pysynphot')
if mod is not None:
root = mod.submodule_search_locations[0]
os.environ['PYSYN_CDBS'] = os.path.join(root, 'data', 'cdbs') # http://ssb.stsci.edu/cdbs/tarfiles/synphot1.tar.gz
import pysynphot as S # http://ssb.stsci.edu/cdbs/tarfiles/synphot2.tar.gz
# http://ssb.stsci.edu/cdbs/tarfiles/synphot3.tar.gz
else:
print('warning: module pysynphot not found')
class Stars:
# from VizieR catalogs:
SOURCE_HIPPARCHOS = 'H' # I/239/hip_main
SOURCE_PASTEL = 'P' # B/pastel/pastel
SOURCE_WU = 'W' # J/A+A/525/A71/table2
SOURCE_GAIA1 = 'G' # J/MNRAS/471/770/table2
STARDB_TYC = os.path.join(DATA_DIR, 'deep_space_objects_tyc.sqlite')
STARDB_HIP = os.path.join(DATA_DIR, 'deep_space_objects_hip.sqlite')
STARDB = STARDB_HIP
MAG_CUTOFF = 10
MAG_V_LAM0 = 545e-9
SUN_MAG_V = -26.74
SUN_MAG_B = 0.6222 + SUN_MAG_V
# from sc cam frame (axis: +x, up: +z) to equatorial frame (axis: +y, up: +z)
sc2ec_q = np.quaternion(1, 0, 0, 1).normalized().conj()
@staticmethod
def black_body_radiation(Teff, lam):
return Stars.black_body_radiation_fn(Teff)(lam)
@staticmethod
def black_body_radiation_fn(Teff):
def phi(lam):
# planck's law of black body radiation [W/m3/sr]
h = 6.626e-34 # planck constant (m2kg/s)
c = 3e8 # speed of light
k = 1.380649e-23 # Boltzmann constant
r = 2*h*c**2/lam**5/(np.exp(h*c/lam/k/Teff) - 1)
return r
return phi
@staticmethod
def synthetic_radiation(Teff, fe_h, log_g, lam, mag_v=None):
return Stars.synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=mag_v)(lam)
@staticmethod
@lru_cache(maxsize=1000)
def synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=None, model='k93models',
lam_min=0, lam_max=np.inf, return_sp=False):
return Stars.uncached_synthetic_radiation_fn(Teff, fe_h, log_g, mag_v, model, lam_min, lam_max, return_sp)
@staticmethod
def uncached_synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=None, model='k93models',
lam_min=0, lam_max=np.inf, return_sp=False):
sp = None
orig_log_g = log_g
if isinstance(model, tuple):
# give in meters, W/m3
sp = S.spectrum.ArraySourceSpectrum(np.array(model[0]) * 1e10,
np.array(model[1]) * 1e-4 * 1e-10 / 1e-7, 'angstrom', 'flam')
else:
first_try = True
if Teff < 3500:
print('could not init spectral model with given t_eff=%s, using t_eff=3500K instead' % Teff)
Teff = 3500
for i in range(15):
try:
sp = S.Icat(model, Teff, fe_h, log_g) # 'ck04models' or 'k93models'
break
except:
first_try = False
log_g = log_g + (0.2 if Teff > 6000 else -0.2)
assert sp is not None, 'could not init spectral model with given params: t_eff=%s, log_g=%s, fe_h=%s' % (Teff, orig_log_g, fe_h)
if not first_try:
print('could not init spectral model with given params (t_eff=%s, log_g=%s, fe_h=%s), changed log_g to %s' %
(Teff, orig_log_g, fe_h, log_g))
if mag_v is not None:
sp = sp.renorm(mag_v, 'vegamag', S.ObsBandpass('johnson,v'))
if return_sp:
return sp
# for performance reasons (caching)
from scipy.interpolate import interp1d
I = np.logical_and(sp.wave >= lam_min*1e10, sp.wave <= lam_max*1e10)
sample_fn = interp1d(sp.wave[I], sp.flux[I], kind='linear', assume_sorted=True)
def phi(lam):
r = sample_fn(lam*1e10) # wavelength in Å, result in "flam" (erg/s/cm2/Å)
return r * 1e-7 / 1e-4 / 1e-10 # result in W/m3
return phi
@staticmethod
def magnitude_to_spectral_flux_density(mag):
# spectral flux density for standard magnitude for V-band (at 545nm)
# from "Model atmospheres broad-band colors, bolometric corrections and temperature calibrations for O - M stars"
# Bessel M.S. et al, Astronomy and Astrophysics, 1998, table A2
# Also at http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/magsystems.pdf
# 363.1e-11 erg/cm2/s/Å (erg=1e-7J, 1cm2=1e-4m2, Å=1e-10m)
phi0 = 363.1e-11 * 1e-7 / 1e-4 / 1e-10 # W/m3
return np.power(10., -0.4 * mag) * phi0
@staticmethod
def tycho_to_johnson(mag_bt, mag_vt):
v = mag_vt - 0.09 * (mag_bt - mag_vt)
b = 0.85 * (mag_bt - mag_vt) + v
return b, v
@staticmethod
def effective_temp(b_v, metal=0, log_g=0):
""" magnitudes in johnson system """
# calculate star effective temperatures, from:
# - http://help.agi.com/stk/index.htm#stk/starConstruction.htm
# - Sekiguchi, M. and Fukugita, M., 2000. A Study of the B−V Color-Temperature Relation. The Astronomical Journal, 120(2), p.1072.
# - metallicity (Fe/H) and log surface gravity can be set to zero without big impact
c0 = 3.939654
c1 = -0.395361
c2 = 0.2082113
c3 = -0.0604097
f1 = 0.027153
f2 = 0.005036
g1 = 0.007367
h1 = -0.01069
return 10**(c0+c1*(b_v)+c2*(b_v)**2+c3*(b_v)**3 + f1*metal + f2*metal**2 + g1*log_g + h1*(b_v)*log_g)
@staticmethod
def flux_density(cam_q, cam, mask=None, mag_cutoff=MAG_CUTOFF, array=False, undistorted=False, order_by=None):
"""
plots stars based on Tycho-2 database, gives out photon count per unit area given exposure time in seconds,
cam_q is a quaternion in ICRS coord frame, x_fov and y_fov in degrees
"""
# calculate query conditions for star ra and dec
cam_dec, cam_ra, _ = tools.q_to_ypr(cam_q) # camera boresight in ICRS coords
d = np.linalg.norm((cam.x_fov, cam.y_fov))/2
min_dec, max_dec = math.degrees(cam_dec) - d, math.degrees(cam_dec) + d
dec_cond = '(dec BETWEEN %s AND %s)' % (min_dec, max_dec)
# goes over the pole to the other side of the sphere, easy solution => ignore limit on ra
skip_ra_cond = min_dec < -90 or max_dec > 90
if skip_ra_cond:
ra_cond = '1'
else:
min_ra, max_ra = math.degrees(cam_ra) - d, math.degrees(cam_ra) + d
if min_ra < 0:
ra_cond = '(ra < %s OR ra > %s)' % (max_ra, (min_ra + 360) % 360)
elif max_ra > 360:
ra_cond = '(ra > %s OR ra < %s)' % (min_ra, max_ra % 360)
else:
ra_cond = '(ra BETWEEN %s AND %s)' % (min_ra, max_ra)
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
# the magnitudes for tycho id xxxx-xxxxx-2 entries are bad as they are most likely taken from hip catalog that bundles all .*-(\d)
results = cursor.execute("""
SELECT x, y, z, mag_v""" + (", mag_b, t_eff, fe_h, log_g, dec, ra, id" if array else "") + """
FROM deep_sky_objects
WHERE """ + ("tycho like '%-1' AND " if Stars.STARDB == Stars.STARDB_TYC else "") +
"mag_v < " + str(mag_cutoff) + " AND " + dec_cond + " AND " + ra_cond +
((" ORDER BY %s ASC" % order_by) if order_by is not None else ''))
stars = np.array(results.fetchall())
conn.close()
flux_density = ([], None) if array else np.zeros((cam.height, cam.width), dtype=np.float32)
if len(stars) == 0:
return flux_density
stars[:, 0:3] = tools.q_times_mx(SystemModel.sc2gl_q.conj() * cam_q.conj(), stars[:, 0:3])
stars_ixy_ = cam.calc_img_R(stars[:, 0:3], undistorted=undistorted)
stars_ixy = np.round(stars_ixy_.astype(np.float)).astype(np.int)
I = np.logical_and.reduce((np.all(stars_ixy >= 0, axis=1),
stars_ixy[:, 0] <= cam.width-1,
stars_ixy[:, 1] <= cam.height-1))
if array:
cols = ('ix', 'iy', 'x', 'y', 'z', 'mag_v', 'mag_b', 't_eff', 'fe_h', 'log_g', 'dec', 'ra', 'id')
return (
np.hstack((stars_ixy_[I, :], stars[I, :])),
dict(zip(cols, range(len(cols))))
)
stars_ixy = stars_ixy[I, :]
flux_density_per_star = Stars.magnitude_to_spectral_flux_density(stars[I, 3])
for i, f in enumerate(flux_density_per_star):
flux_density[stars_ixy[i, 1], stars_ixy[i, 0]] += f
if mask is not None:
flux_density[np.logical_not(mask)] = 0
if True:
# assume every star is like our sun, convert to total flux density [W/m2]
solar_constant = 1360.8
# sun magnitude from http://mips.as.arizona.edu/~cnaw/sun.html
sun_flux_density = Stars.magnitude_to_spectral_flux_density(Stars.SUN_MAG_V)
flux_density = flux_density * (solar_constant / sun_flux_density)
return flux_density
@staticmethod
def get_property_by_id(id, field=None):
res = Stars._query_cursor.execute(f"select {field} from deep_sky_objects where id = {int(id)}").fetchone()[0]
return res
@staticmethod
def get_catalog_id(id, field=None):
try:
is_arr = False
id = int(id)
except:
is_arr = True
if Stars._query_conn is None:
Stars._conn = sqlite3.connect(Stars.STARDB)
Stars._query_cursor = Stars._conn.cursor()
field = field or ("tycho" if Stars.STARDB == Stars.STARDB_TYC else "hip")
if is_arr:
res = Stars._query_cursor.execute(
"select id, %s from deep_sky_objects where id IN (%s)" % (
field, ','.join(str(i) for i in id))).fetchall()
return {r[0]: str(r[1]) for r in res}
else:
res = Stars._query_cursor.execute(
"select %s from deep_sky_objects where id = %s" % (
field, id)).fetchone()[0]
return str(res)
_query_conn, _query_cursor = None, None
@staticmethod
def _create_stardb(fname):
conn = sqlite3.connect(fname)
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS deep_sky_objects")
cursor.execute("""
CREATE TABLE deep_sky_objects (
id INTEGER PRIMARY KEY ASC NOT NULL,
hip INT,
hd INT DEFAULT NULL,
simbad CHAR(20) DEFAULT NULL,
ra REAL NOT NULL, /* src[0] */
dec REAL NOT NULL, /* src[0] */
x REAL NOT NULL,
y REAL NOT NULL,
z REAL NOT NULL,
mag_v REAL NOT NULL, /* src[1] */
mag_b REAL DEFAULT NULL, /* src[2] */
t_eff REAL DEFAULT NULL, /* src[3] */
log_g REAL DEFAULT NULL, /* src[4] */
fe_h REAL DEFAULT NULL, /* src[5] */
src CHAR(6) DEFAULT 'HHHPPP'
)""")
cursor.execute("DROP INDEX IF EXISTS ra_idx")
cursor.execute("CREATE INDEX ra_idx ON deep_sky_objects (ra)")
cursor.execute("DROP INDEX IF EXISTS dec_idx")
cursor.execute("CREATE INDEX dec_idx ON deep_sky_objects (dec)")
cursor.execute("DROP INDEX IF EXISTS mag_idx")
cursor.execute("CREATE INDEX mag_idx ON deep_sky_objects (mag_v)")
cursor.execute("DROP INDEX IF EXISTS hd")
cursor.execute("CREATE INDEX hd ON deep_sky_objects (hd)")
cursor.execute("DROP INDEX IF EXISTS simbad")
cursor.execute("CREATE INDEX simbad ON deep_sky_objects (simbad)")
cursor.execute("DROP INDEX IF EXISTS hip")
cursor.execute("CREATE UNIQUE INDEX hip ON deep_sky_objects (hip)")
conn.commit()
@staticmethod
def import_stars_hip():
# I/239/hip_main
Stars._create_stardb(Stars.STARDB_HIP)
conn = sqlite3.connect(Stars.STARDB_HIP)
cursor = conn.cursor()
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
cols = ["HIP", "HD", "_RA.icrs", "_DE.icrs", "Vmag", "B-V"]
r = Vizier(catalog="I/239/hip_main", columns=cols, row_limit=-1).query_constraints()[0]
for i, row in enumerate(r):
hip, hd, ra, dec, mag_v, b_v = [row[f] for f in cols]
if np.any(list(map(np.ma.is_masked, (ra, dec, mag_v)))):
continue
hd = 'null' if np.ma.is_masked(hd) else hd
mag_b = 'null' if np.ma.is_masked(b_v) or np.isnan(b_v) else b_v + mag_v
x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1)
cursor.execute("""
INSERT INTO deep_sky_objects (hip, hd, ra, dec, x, y, z, mag_v, mag_b)
VALUES (%s, %s, %f, %f, %f, %f, %f, %f, %s)"""
% (hip, hd, ra, dec, x, y, z, mag_v, mag_b))
if i % 100 == 0:
conn.commit()
tools.show_progress(len(r), i)
conn.commit()
conn.close()
@staticmethod
def import_stars_tyc():
assert False, 'not supported anymore'
Stars._create_stardb(Stars.STARDB_TYC, 12)
conn = sqlite3.connect(Stars.STARDB_TYC)
cursor = conn.cursor()
# Tycho-2 catalogue, from http://archive.eso.org/ASTROM/TYC-2/data/
for file in ('catalog.dat', 'suppl_1.dat'):
with open(os.path.join(DATA_DIR, file), 'r') as fh:
line = fh.readline()
while line:
c = line
line = fh.readline()
# mean position, ICRS, at epoch 2000.0
# proper motion milliarcsecond/year
# apparent magnitude
if file == 'catalog.dat':
# main catalog
epoch = 2000.0
tycho, ra, dec, pmra, pmdec, mag_bt, mag_vt = c[0:12], c[15:27], c[28:40], c[41:48], c[49:56], c[110:116], c[123:129]
mag_b, mag_v = Stars.tycho_to_johnson(float(mag_bt), float(mag_vt))
else:
# supplement-1 has the brightest stars, from hipparcos and tycho-1
epoch = 1991.25
tycho, ra, dec, pmra, pmdec, mag_bt, mag_vt, flag, hip = \
c[0:12], c[15:27], c[28:40], c[41:48], c[49:56], c[83:89], c[96:102], c[81:82], c[115:120]
if flag in ('H', 'V', 'B'):
if len(hip.strip()) > 0:
mag_b, mag_v = Stars.get_hip_mag_bv(hip)
else:
continue
else:
mag_b, mag_v = Stars.tycho_to_johnson(float(mag_bt), float(mag_vt))
tycho = tycho.replace(' ', '-')
if np.all(list(map(tools.numeric, (ra, dec)))):
ra, dec = list(map(float, (ra, dec)))
if -10 < mag_v < Stars.MAG_CUTOFF:
curr_epoch = datetime.now().year + \
(datetime.now().timestamp()
- datetime.strptime(str(datetime.now().year),'%Y').timestamp()
)/365.25/24/3600
years = curr_epoch - epoch
# TODO: (1) adjust to current epoch using proper motion and years since epoch
x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1)
cursor.execute("INSERT INTO deep_sky_objects (tycho,ra,dec,x,y,z,mag_b,mag_v) VALUES (?,?,?,?,?,?,?,?)", (
tycho, (ra+360)%360, dec, x, y, z, mag_b, mag_v
))
conn.commit()
conn.close()
@staticmethod
def add_simbad_col():
conn = sqlite3.connect(Stars.STARDB)
cursor_r = conn.cursor()
cursor_w = conn.cursor()
# cursor_w.execute("alter table deep_sky_objects add column simbad char(20) default null")
# conn.commit()
N_tot = cursor_r.execute("SELECT max(id) FROM deep_sky_objects WHERE 1").fetchone()[0]
skip = 0
result = cursor_r.execute("select id, hip from deep_sky_objects where id >= %d" % skip)
import time
from astroquery.simbad import Simbad
Simbad.add_votable_fields('typed_id')
while 1:
rows = result.fetchmany(1000)
if rows is None or len(rows) == 0:
break
tools.show_progress(N_tot, rows[0][0]-1)
s = Simbad.query_objects(['HIP %d' % int(row[1]) for row in rows])
time.sleep(2)
values = []
if s is not None:
s.add_index('TYPED_ID')
for row in rows:
sr = get(s, ('HIP %d' % int(row[1])).encode('utf-8'))
if sr is not None:
k = sr['MAIN_ID'].decode('utf-8')
values.append("(%d, '%s', 0,0,0,0,0,0)" % (row[0], k.replace("'", "''")))
if len(values) > 0:
cursor_w.execute("""
INSERT INTO deep_sky_objects (id, simbad, ra, dec, x, y, z, mag_v) VALUES """ + ','.join(values) + """
ON CONFLICT(id) DO UPDATE SET simbad = excluded.simbad""")
conn.commit()
conn.close()
@staticmethod
def query_t_eff():
from astroquery.vizier import Vizier
v = Vizier(catalog="B/pastel/pastel", columns=["ID", "Teff", "logg", "[Fe/H]"], row_limit=-1)
v2 = Vizier(catalog="J/A+A/525/A71/table2", columns=["Name", "Teff", "log(g)", "[Fe/H]"], row_limit=-1)
v3 = Vizier(catalog="J/MNRAS/471/770/table2", columns=["HIP", "Teff", "log(g)"], row_limit=-1)
conn = sqlite3.connect(Stars.STARDB)
cursor_r = conn.cursor()
cursor_w = conn.cursor()
cond = "(t_eff is null OR log_g is null OR 1)"
N_tot = cursor_r.execute("""
SELECT max(id) FROM deep_sky_objects
WHERE %s
""" % cond).fetchone()[0]
skip = 37601
f_id, f_hip, f_hd, f_sim, f_ra, f_dec, f_t, f_g, f_m, f_src = range(10)
results = cursor_r.execute("""
SELECT id, hip, hd, simbad, ra, dec, t_eff, log_g, fe_h, src
FROM deep_sky_objects
WHERE %s AND id >= ?
ORDER BY id ASC
""" % cond, (skip,))
r = v.query_constraints()[0]
r.add_index('ID')
N = 40
while True:
rows = results.fetchmany(N)
if rows is None or len(rows) == 0:
break
tools.show_progress(N_tot, rows[0][f_id]-1)
ids = {row[f_id]: [i, row[f_src][:3] + '___'] for i, row in enumerate(rows)}
insert = {}
for i, row in enumerate(rows):
k = 'HIP %6d' % int(row[f_hip])
if get(r, k) is None and row[f_hd]:
k = 'HD %6d' % int(row[f_hd])
if get(r, k) is None and row[f_sim]:
k = row[f_sim]
if get(r, k) is None and row[f_sim]:
k = row[f_sim] + ' A'
dr = get(r, k)
if dr is not None:
t_eff, log_g, fe_h = median(dr, ('Teff', 'logg', '__Fe_H_'), null='null')
src = row[f_src][0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_PASTEL) for v in (t_eff, log_g, fe_h)])
insert[row[f_id]] = [t_eff, log_g, fe_h, src]
if '_' not in src[3:5]:
ids.pop(row[f_id])
else:
ids[row[f_id]][1] = src
if len(ids) > 0:
# try using other catalog
r = v2.query_constraints(Name='=,' + ','.join([
('HD%06d' % int(rows[i][f_hd])) for i, s in ids.values() if rows[i][f_hd] is not None
]))
time.sleep(2)
if len(r) > 0:
r = r[0]
r.add_index('Name')
for id, (i, src) in ids.copy().items():
dr = get(r, 'HD%06d' % int(rows[i][f_hd])) if rows[i][f_hd] else None
if dr is not None:
t_eff, log_g, fe_h = median(dr, ('Teff', 'log_g_', '__Fe_H_'), null='null')
src = src[0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_WU) for v in (t_eff, log_g, fe_h)])
insert[id] = [t_eff, log_g, fe_h, src]
if '_' not in src[3:5]:
ids.pop(rows[i][f_id])
else:
ids[rows[i][f_id]][1] = src
if len(ids) > 0:
# try using other catalog
r = v3.query_constraints(HIP='=,' + ','.join([str(rows[i][f_hip]) for i, s in ids.values()]))[0]
r.add_index('HIP')
for id, (i, src) in ids.copy().items():
dr = get(r, int(rows[i][f_hip]))
if dr is not None:
t_eff, log_g = median(dr, ('Teff', 'log_g_'), null='null')
src = src[0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_GAIA1) for v in (t_eff, log_g)]) + src[5]
insert[id] = [t_eff, log_g, insert[id][2] if id in insert else 'null', src]
# if '_' not in src[3:5]:
# ids.pop(rows[i][f_id])
# else:
# ids[rows[i][f_id]][1] = src
if len(insert) > 0:
values = ["(%d, %s, %s, %s, '%s', 0,0,0,0,0,0)" % (
id, t_eff, log_g, fe_h, src)
for id, (t_eff, log_g, fe_h, src) in insert.items()]
cursor_w.execute("""
INSERT INTO deep_sky_objects (id, t_eff, log_g, fe_h, src, ra, dec, x, y, z, mag_v) VALUES """ + ','.join(values) + """
ON CONFLICT(id) DO UPDATE SET
t_eff = excluded.t_eff,
log_g = excluded.log_g,
fe_h = excluded.fe_h,
src = excluded.src
""")
conn.commit()
conn.close()
@staticmethod
def query_v_mag():
from astroquery.vizier import Vizier
from tqdm import tqdm
v = Vizier(catalog="B/pastel/pastel", columns=["ID", "Vmag"], row_limit=-1)
conn = sqlite3.connect(Stars.STARDB)
cursor_r = conn.cursor()
cursor_w = conn.cursor()
cond = f"(substr(src,2,1) = '{Stars.SOURCE_HIPPARCHOS}')"
N_tot = cursor_r.execute(f"SELECT count(*) FROM deep_sky_objects WHERE {cond}").fetchone()[0]
f_id, f_hip, f_hd, f_sim, f_mag_v, f_src = range(6)
results = cursor_r.execute("""
SELECT id, hip, hd, simbad, mag_v, src
FROM deep_sky_objects
WHERE %s
ORDER BY mag_v ASC
""" % cond)
r = v.query_constraints()[0]
r.add_index('ID')
N = 40
pbar = tqdm(total=N_tot)
while True:
rows = results.fetchmany(N)
if rows is None or len(rows) == 0:
break
ids = {row[f_id]: [i, row[f_src]] for i, row in enumerate(rows)}
insert = {}
for i, row in enumerate(rows):
k = 'HIP %6d' % int(row[f_hip])
if get(r, k) is None and row[f_hd]:
k = 'HD %6d' % int(row[f_hd])
if get(r, k) is None and row[f_sim]:
k = row[f_sim]
if get(r, k) is None and row[f_sim]:
k = row[f_sim] + ' A'
dr = get(r, k)
if dr is not None:
v_mag, *_ = median(dr, ('Vmag',), null='null')
if v_mag != 'null':
src = row[f_src]
src = src[:1] + Stars.SOURCE_PASTEL + src[2:]
insert[row[f_id]] = [v_mag, src]
ids.pop(row[f_id])
if len(insert) > 0:
values = [f"({id}, 0, 0, 0, '{src}', 0, 0, 0, 0, 0, {v_mag})" for id, (v_mag, src) in insert.items()]
cursor_w.execute("INSERT INTO deep_sky_objects (id, t_eff, log_g, fe_h, src, ra, dec, x, y, z, mag_v) "
"VALUES " + ','.join(values) + " "
"ON CONFLICT(id) DO UPDATE SET "
" mag_v = excluded.mag_v, "
" src = excluded.src")
conn.commit()
pbar.set_postfix({'v_mag': np.max([float(row[f_mag_v]) for row in rows])})
pbar.update(len(rows))
conn.close()
@staticmethod
def correct_supplement_data():
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
def insert_mags(hips):
res = Stars.get_hip_mag_bv([h[0] for h in hips.values()])
insert = ["('%s', %f, %f, %f, %f, %f, %f, %f)" %
(t, h[1], h[2], h[3], h[4], h[5], res[h[0]][0], res[h[0]][1])
for t, h in hips.items() if h[0] in res and -10 < res[h[0]][1] < Stars.MAG_CUTOFF]
if len(insert) > 0:
cursor.execute("""
INSERT INTO deep_sky_objects (tycho, ra, dec, x, y, z, mag_b, mag_v) VALUES
""" + ','.join(insert) + """
ON CONFLICT(tycho) DO UPDATE SET mag_b = excluded.mag_b, mag_v = excluded.mag_v """)
conn.commit()
file = 'suppl_1.dat'
N = 30
rx = re.compile(r'0*(\d+)')
with open(os.path.join(DATA_DIR, file), 'r') as fh:
hips = {}
line = fh.readline()
while line:
c = line
line = fh.readline()
tycho, ra, dec, mag_bt, mag_vt, flag, hip = c[0:12], c[15:27], c[28:40], c[83:89], c[96:102], c[81:82], c[115:123]
tycho = tycho.replace(' ', '-')
hip = rx.findall(hip)[0] if len(hip.strip()) > 0 else False
if flag in ('H', 'V', 'B') and hip:
ra, dec = float(ra), float(dec)
x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1)
hips[tycho] = (hip, ra, dec, x, y, z)
if len(hips) >= N:
insert_mags(hips)
hips.clear()
else:
continue
if len(hips) > 0:
insert_mags(hips)
@staticmethod
def get_hip_mag_bv(hip, v=None):
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
hips = [hip] if isinstance(hip, str) else hip
v = Vizier(columns=["HIP", "Vmag", "B-V"], catalog="I/239/hip_main", row_limit=-1)
r = v.query_constraints(HIP='=,'+','.join(hips))
results = {}
if len(r):
r = r[0]
r.add_index('HIP')
for h in hips:
try:
if not np.ma.is_masked(r.loc[int(h)]['Vmag']) and not np.ma.is_masked(r.loc[int(h)]['B-V']):
mag_v, b_v = float(r.loc[int(h)]['Vmag']), float(r.loc[int(h)]['B-V'])
results[h] = (mag_v + b_v, mag_v)
except:
continue
return results.get(hip, (None, None)) if isinstance(hip, str) else results
@staticmethod
def override_betelgeuse():
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
# from "The Advanced Spectral Library (ASTRAL): Reference Spectra for Evolved M Stars",
# The Astrophysical Journal, 2018, https://iopscience.iop.org/article/10.3847/1538-4357/aaf164/pdf
#t_eff = 3650 # based on query_t_eff was 3562
#mag_v = 0.42 # based on tycho2 suppl2 was 0.58
# from CTOA observations on 2018-12-07 and 18-12-22, accessed through https://www.aavso.org database
mag_v = 0.8680
mag_b = 2.6745 # based on tycho2 suppl2 was 2.3498
t_eff = None # Stars.effective_temp(mag_b - mag_v, metal=0.006, log_g=-0.26) gives 3565K vs 3538K without log_g & metal
cursor.execute("UPDATE deep_sky_objects SET t_eff=?, mag_v=?, mag_b=? where tycho='0129-01873-1'", (t_eff, mag_v, mag_b))
conn.commit()
conn.close()
def get(r, k, d=None):
if k is None or r is None:
return d
try:
return r.loc[k]
except:
return d
def median(dr, fields, null='null'):
try:
values = [np.ma.median(dr[f]) for f in fields]
values = [(null if np.ma.is_masked(v) else v) for v in values]
except:
values = [null if np.ma.is_masked(dr[f]) or np.isnan(dr[f]) else dr[f] for f in fields]
return values
if __name__ == '__main__':
if 0:
Stars.import_stars_hip()
quit()
elif 0:
Stars.add_simbad_col()
#Stars.override_rho_ori_b()
#Stars.override_delta_ori_b()
quit()
elif 0:
Stars.query_t_eff()
quit()
elif 0:
Stars.query_v_mag()
quit()
elif 0:
img = np.zeros((1024, 1024), dtype=np.uint8)
for i in range(1000):
Stars.plot_stars(img, tools.rand_q(math.radians(180)), cam, exposure=5, gain=1)
quit()
elif 1:
conn = sqlite3.connect(Stars.STARDB)
cursor = conn.cursor()
f_id, f_hip, f_sim, f_hd, f_magv, f_magb, f_teff, f_logg, f_feh, f_src = range(10)
r = cursor.execute("""
SELECT id, hip, simbad, hd, mag_v, mag_b, t_eff, log_g, fe_h, src
FROM deep_sky_objects
WHERE hd in (48915,34085,61421,39801,35468,37128,37742,37743,44743,38771,36486,48737,36861,33111,58715)
ORDER BY mag_v
""")
rows = r.fetchall()
stars = {}
print('id\thip\tsim\thd\tmag_v\tmag_b\tt_eff\tlog_g\tfe_h\tsrc')
for row in rows:
stars[row[f_hd]] = row
print('\t'.join([str(c) for c in row]))
conn.close()
quit()
from astropy.io import fits
import matplotlib.pyplot as plt
def testf(fdat, teff, logg, feh):
sp = S.Icat('k93models', float(teff), float(feh), float(logg))\
.renorm(0, 'vegamag', S.ObsBandpass('johnson,v'))
sp_real = S.ArraySpectrum(wave=fdat[0][0], flux=fdat[0][1], fluxunits='flam')\
.renorm(0, 'vegamag', S.ObsBandpass('johnson,v'))
plt.plot(sp_real.wave, sp_real.flux)
plt.plot(sp.wave, sp.flux)
plt.xlim(3000, 10000)
plt.show()
for hd in (48737, 35468, 39801): # Lambda Orionis (HD36861) Teff too high for model (37689K)
fname = r'C:\projects\s100imgs\spectra\%s.fits' % hd
fdat = fits.getdata(fname)
teff, logg, feh = [stars[hd][f] for f in (f_teff, f_logg, f_feh)]
if teff > 30000:
logg = max(logg, 4.0)
testf(fdat, teff, logg, feh or 0)
quit()
# cam = RosettaSystemModel(focused_attenuated=False).cam
cam = DidymosSystemModel(use_narrow_cam=True).cam
# cam_q = tools.rand_q(math.radians(180))
cam_q = quaternion.one
for i in range(100):
cam_q = tools.ypr_to_q(0, np.radians(1), 0) * cam_q
flux_density = Stars.flux_density(cam_q, cam)
img = cam.sense(flux_density, exposure=2, gain=2)
img = np.clip(img*255, 0, 255).astype('uint8')
img = ImageProc.adjust_gamma(img, 1.8)
sc = min(768/cam.width, 768/cam.height)
cv2.imshow('stars', cv2.resize(img, None, fx=sc, fy=sc))
cv2.waitKey()
print('done')
|
mit
|
xkcd1253/Mimi
|
flask/lib/python2.7/site-packages/whoosh/sorting.py
|
6
|
41298
|
# Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from array import array
from collections import defaultdict
from whoosh.compat import string_type
from whoosh.compat import iteritems, izip, xrange
# Faceting objects
class FacetType(object):
"""Base class for "facets", aspects that can be sorted/faceted.
"""
maptype = None
def categorizer(self, global_searcher):
"""Returns a :class:`Categorizer` corresponding to this facet.
:param global_searcher: A parent searcher. You can use this searcher if
you need global document ID references.
"""
raise NotImplementedError
def map(self, default=None):
t = self.maptype
if t is None:
t = default
if t is None:
return OrderedList()
elif type(t) is type:
return t()
else:
return t
def default_name(self):
return "facet"
class Categorizer(object):
"""Base class for categorizer objects which compute a key value for a
document based on certain criteria, for use in sorting/faceting.
Categorizers are created by FacetType objects through the
:meth:`FacetType.categorizer` method. The
:class:`whoosh.searching.Searcher` object passed to the ``categorizer``
method may be a composite searcher (that is, wrapping a multi-reader), but
categorizers are always run **per-segment**, with segment-relative document
numbers.
The collector will call a categorizer's ``set_searcher`` method as it
searches each segment to let the cateogorizer set up whatever segment-
specific data it needs.
``Collector.allow_overlap`` should be ``True`` if the caller can use the
``keys_for`` method instead of ``key_for`` to group documents into
potentially overlapping groups. The default is ``False``.
If a categorizer subclass can categorize the document using only the
document number, it should set ``Collector.needs_current`` to ``False``
(this is the default) and NOT USE the given matcher in the ``key_for`` or
``keys_for`` methods, since in that case ``segment_docnum`` is not
guaranteed to be consistent with the given matcher. If a categorizer
subclass needs to access information on the matcher, it should set
``needs_current`` to ``True``. This will prevent the caller from using
optimizations that might leave the matcher in an inconsistent state.
"""
allow_overlap = False
needs_current = False
def set_searcher(self, segment_searcher, docoffset):
"""Called by the collector when the collector moves to a new segment.
The ``segment_searcher`` will be atomic. The ``docoffset`` is the
offset of the segment's document numbers relative to the entire index.
You can use the offset to get absolute index docnums by adding the
offset to segment-relative docnums.
"""
pass
def key_for(self, matcher, segment_docnum):
"""Returns a key for the current match.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "key_for_id"):
return self.key_for_id(segment_docnum)
elif hasattr(self, "key_for_matcher"):
return self.key_for_matcher(matcher)
raise NotImplementedError(self.__class__)
def keys_for(self, matcher, segment_docnum):
"""Yields a series of keys for the current match.
This method will be called instead of ``key_for`` if
``self.allow_overlap`` is ``True``.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "keys_for_id"):
return self.keys_for_id(segment_docnum)
raise NotImplementedError(self.__class__)
def key_to_name(self, key):
"""Returns a representation of the key to be used as a dictionary key
in faceting. For example, the sorting key for date fields is a large
integer; this method translates it into a ``datetime`` object to make
the groupings clearer.
"""
return key
# General field facet
class FieldFacet(FacetType):
"""Sorts/facest by the contents of a field.
For example, to sort by the contents of the "path" field in reverse order,
and facet by the contents of the "tag" field::
paths = FieldFacet("path", reverse=True)
tags = FieldFacet("tag")
results = searcher.search(myquery, sortedby=paths, groupedby=tags)
This facet returns different categorizers based on the field type.
"""
def __init__(self, fieldname, reverse=False, allow_overlap=False,
maptype=None):
"""
:param fieldname: the name of the field to sort/facet on.
:param reverse: if True, when sorting, reverse the sort order of this
facet.
:param allow_overlap: if True, when grouping, allow documents to appear
in multiple groups when they have multiple terms in the field.
"""
self.fieldname = fieldname
self.reverse = reverse
self.allow_overlap = allow_overlap
self.maptype = maptype
def default_name(self):
return self.fieldname
def categorizer(self, global_searcher):
# The searcher we're passed here may wrap a multireader, but the
# actual key functions will always be called per-segment following a
# Categorizer.set_searcher method call
fieldname = self.fieldname
fieldobj = global_searcher.schema[fieldname]
# If we're grouping with allow_overlap=True, all we can use is
# OverlappingCategorizer
if self.allow_overlap:
return OverlappingCategorizer(global_searcher, fieldname)
if global_searcher.reader().has_column(fieldname):
coltype = fieldobj.column_type
if coltype.reversible or not self.reverse:
c = ColumnCategorizer(global_searcher, fieldname, self.reverse)
else:
c = ReversedColumnCategorizer(global_searcher, fieldname)
else:
c = PostingCategorizer(global_searcher, fieldname,
self.reverse)
return c
class ColumnCategorizer(Categorizer):
def __init__(self, global_searcher, fieldname, reverse=False):
self._fieldname = fieldname
self._fieldobj = global_searcher.schema[self._fieldname]
self._reverse = reverse
def __repr__(self):
return "%s(%r, %r, reverse=%r)" % (self.__class__.__name__,
self._fieldobj, self._fieldname,
self._reverse)
def set_searcher(self, segment_searcher, docoffset):
r = segment_searcher.reader()
self._creader = r.column_reader(self._fieldname, translate=False)
def key_for(self, matcher, segment_docnum):
return self._creader.sort_key(segment_docnum, self._reverse)
def key_to_name(self, key):
return self._fieldobj.from_column_value(key)
class ReversedColumnCategorizer(ColumnCategorizer):
"""Categorizer that reverses column values for columns that aren't
naturally reversible.
"""
def __init__(self, global_searcher, fieldname):
ColumnCategorizer.__init__(self, global_searcher, fieldname)
reader = global_searcher.reader()
self._doccount = reader.doc_count_all()
global_creader = reader.column_reader(fieldname, translate=False)
self._values = sorted(set(global_creader))
def key_for(self, matcher, segment_docnum):
value = self._creader[segment_docnum]
order = self._values.index(value)
# Subtract from 0 to reverse the order
return 0 - order
def key_to_name(self, key):
# Re-reverse the key to get the index into _values
key = self._values[0 - key]
return ColumnCategorizer.key_to_name(self, key)
class OverlappingCategorizer(Categorizer):
allow_overlap = True
def __init__(self, global_searcher, fieldname):
self._fieldname = fieldname
self._fieldobj = global_searcher.schema[fieldname]
field = global_searcher.schema[fieldname]
reader = global_searcher.reader()
self._use_vectors = bool(field.vector)
self._use_column = (reader.has_column(fieldname)
and field.column_type.stores_lists())
def set_searcher(self, segment_searcher, docoffset):
fieldname = self._fieldname
self._segment_searcher = segment_searcher
reader = segment_searcher.reader()
if self._use_vectors:
pass
elif self._use_column:
self._creader = reader.column_reader(fieldname, translate=False)
else:
# Otherwise, cache the values in each document in a huge list
# of lists
dc = segment_searcher.doc_count_all()
field = segment_searcher.schema[fieldname]
from_bytes = field.from_bytes
self._lists = [[] for _ in xrange(dc)]
for btext in field.sortable_terms(reader, fieldname):
text = from_bytes(btext)
postings = reader.postings(fieldname, btext)
for docid in postings.all_ids():
self._lists[docid].append(text)
def keys_for(self, matcher, docid):
if self._use_vectors:
try:
v = self._segment_searcher.vector(docid, self._fieldname)
return list(v.all_ids())
except KeyError:
return []
elif self._use_column:
return self._creader[docid]
else:
return self._lists[docid] or [None]
def key_for(self, matcher, docid):
if self._use_vectors:
try:
v = self._segment_searcher.vector(docid, self._fieldname)
return v.id()
except KeyError:
return None
elif self._use_column:
return self._creader.sort_key(docid)
else:
ls = self._lists[docid]
if ls:
return ls[0]
else:
return None
class PostingCategorizer(Categorizer):
"""
Categorizer for fields that don't store column values. This is very
inefficient. Instead of relying on this categorizer you should plan for
which fields you'll want to sort on and set ``sortable=True`` in their
field type.
This object builds an array caching the order of all documents according to
the field, then uses the cached order as a numeric key. This is useful when
a field cache is not available, and also for reversed fields (since field
cache keys for non- numeric fields are arbitrary data, it's not possible to
"negate" them to reverse the sort order).
"""
def __init__(self, global_searcher, fieldname, reverse):
self.reverse = reverse
if fieldname in global_searcher._field_caches:
self.values, self.array = global_searcher._field_caches[fieldname]
else:
# Cache the relative positions of all docs with the given field
# across the entire index
reader = global_searcher.reader()
dc = reader.doc_count_all()
self._fieldobj = global_searcher.schema[fieldname]
from_bytes = self._fieldobj.from_bytes
self.values = []
self.array = array("i", [dc + 1] * dc)
btexts = self._fieldobj.sortable_terms(reader, fieldname)
for i, btext in enumerate(btexts):
self.values.append(from_bytes(btext))
# Get global docids from global reader
postings = reader.postings(fieldname, btext)
for docid in postings.all_ids():
self.array[docid] = i
global_searcher._field_caches[fieldname] = (self.values, self.array)
def set_searcher(self, segment_searcher, docoffset):
self._searcher = segment_searcher
self.docoffset = docoffset
def key_for(self, matcher, segment_docnum):
global_docnum = self.docoffset + segment_docnum
i = self.array[global_docnum]
if self.reverse:
i = len(self.values) - i
return i
def key_to_name(self, i):
if i >= len(self.values):
return None
if self.reverse:
i = len(self.values) - i
return self.values[i]
# Special facet types
class QueryFacet(FacetType):
"""Sorts/facets based on the results of a series of queries.
"""
def __init__(self, querydict, other=None, allow_overlap=False,
maptype=None):
"""
:param querydict: a dictionary mapping keys to
:class:`whoosh.query.Query` objects.
:param other: the key to use for documents that don't match any of the
queries.
"""
self.querydict = querydict
self.other = other
self.maptype = maptype
self.allow_overlap = allow_overlap
def categorizer(self, global_searcher):
return self.QueryCategorizer(self.querydict, self.other, self.allow_overlap)
class QueryCategorizer(Categorizer):
def __init__(self, querydict, other, allow_overlap=False):
self.querydict = querydict
self.other = other
self.allow_overlap = allow_overlap
def set_searcher(self, segment_searcher, offset):
self.docsets = {}
for qname, q in self.querydict.items():
docset = set(q.docs(segment_searcher))
if docset:
self.docsets[qname] = docset
self.offset = offset
def key_for(self, matcher, docid):
for qname in self.docsets:
if docid in self.docsets[qname]:
return qname
return self.other
def keys_for(self, matcher, docid):
found = False
for qname in self.docsets:
if docid in self.docsets[qname]:
yield qname
found = True
if not found:
yield None
class RangeFacet(QueryFacet):
"""Sorts/facets based on numeric ranges. For textual ranges, use
:class:`QueryFacet`.
For example, to facet the "price" field into $100 buckets, up to $1000::
prices = RangeFacet("price", 0, 1000, 100)
results = searcher.search(myquery, groupedby=prices)
The ranges/buckets are always **inclusive** at the start and **exclusive**
at the end.
"""
def __init__(self, fieldname, start, end, gap, hardend=False,
maptype=None):
"""
:param fieldname: the numeric field to sort/facet on.
:param start: the start of the entire range.
:param end: the end of the entire range.
:param gap: the size of each "bucket" in the range. This can be a
sequence of sizes. For example, ``gap=[1,5,10]`` will use 1 as the
size of the first bucket, 5 as the size of the second bucket, and
10 as the size of all subsequent buckets.
:param hardend: if True, the end of the last bucket is clamped to the
value of ``end``. If False (the default), the last bucket is always
``gap`` sized, even if that means the end of the last bucket is
after ``end``.
"""
self.fieldname = fieldname
self.start = start
self.end = end
self.gap = gap
self.hardend = hardend
self.maptype = maptype
self._queries()
def default_name(self):
return self.fieldname
def _rangetype(self):
from whoosh import query
return query.NumericRange
def _range_name(self, startval, endval):
return (startval, endval)
def _queries(self):
if not self.gap:
raise Exception("No gap secified (%r)" % self.gap)
if isinstance(self.gap, (list, tuple)):
gaps = self.gap
gapindex = 0
else:
gaps = [self.gap]
gapindex = -1
rangetype = self._rangetype()
self.querydict = {}
cstart = self.start
while cstart < self.end:
thisgap = gaps[gapindex]
if gapindex >= 0:
gapindex += 1
if gapindex == len(gaps):
gapindex = -1
cend = cstart + thisgap
if self.hardend:
cend = min(self.end, cend)
rangename = self._range_name(cstart, cend)
q = rangetype(self.fieldname, cstart, cend, endexcl=True)
self.querydict[rangename] = q
cstart = cend
def categorizer(self, global_searcher):
return QueryFacet(self.querydict).categorizer(global_searcher)
class DateRangeFacet(RangeFacet):
"""Sorts/facets based on date ranges. This is the same as RangeFacet
except you are expected to use ``daterange`` objects as the start and end
of the range, and ``timedelta`` or ``relativedelta`` objects as the gap(s),
and it generates :class:`~whoosh.query.DateRange` queries instead of
:class:`~whoosh.query.TermRange` queries.
For example, to facet a "birthday" range into 5 year buckets::
from datetime import datetime
from whoosh.support.relativedelta import relativedelta
startdate = datetime(1920, 0, 0)
enddate = datetime.now()
gap = relativedelta(years=5)
bdays = DateRangeFacet("birthday", startdate, enddate, gap)
results = searcher.search(myquery, groupedby=bdays)
The ranges/buckets are always **inclusive** at the start and **exclusive**
at the end.
"""
def _rangetype(self):
from whoosh import query
return query.DateRange
class ScoreFacet(FacetType):
"""Uses a document's score as a sorting criterion.
For example, to sort by the ``tag`` field, and then within that by relative
score::
tag_score = MultiFacet(["tag", ScoreFacet()])
results = searcher.search(myquery, sortedby=tag_score)
"""
def categorizer(self, global_searcher):
return self.ScoreCategorizer(global_searcher)
class ScoreCategorizer(Categorizer):
needs_current = True
def __init__(self, global_searcher):
w = global_searcher.weighting
self.use_final = w.use_final
if w.use_final:
self.final = w.final
def set_searcher(self, segment_searcher, offset):
self.segment_searcher = segment_searcher
def key_for(self, matcher, docid):
score = matcher.score()
if self.use_final:
score = self.final(self.segment_searcher, docid, score)
# Negate the score so higher values sort first
return 0 - score
class FunctionFacet(FacetType):
"""This facet type is low-level. In most cases you should use
:class:`TranslateFacet` instead.
This facet type ets you pass an arbitrary function that will compute the
key. This may be easier than subclassing FacetType and Categorizer to set up
the desired behavior.
The function is called with the arguments ``(searcher, docid)``, where the
``searcher`` may be a composite searcher, and the ``docid`` is an absolute
index document number (not segment-relative).
For example, to use the number of words in the document's "content" field
as the sorting/faceting key::
fn = lambda s, docid: s.doc_field_length(docid, "content")
lengths = FunctionFacet(fn)
"""
def __init__(self, fn, maptype=None):
self.fn = fn
self.maptype = maptype
def categorizer(self, global_searcher):
return self.FunctionCategorizer(global_searcher, self.fn)
class FunctionCategorizer(Categorizer):
def __init__(self, global_searcher, fn):
self.global_searcher = global_searcher
self.fn = fn
def set_searcher(self, segment_searcher, docoffset):
self.offset = docoffset
def key_for(self, matcher, docid):
return self.fn(self.global_searcher, docid + self.offset)
class TranslateFacet(FacetType):
"""Lets you specify a function to compute the key based on a key generated
by a wrapped facet.
This is useful if you want to use a custom ordering of a sortable field. For
example, if you want to use an implementation of the Unicode Collation
Algorithm (UCA) to sort a field using the rules from a particular language::
from pyuca import Collator
# The Collator object has a sort_key() method which takes a unicode
# string and returns a sort key
c = Collator("allkeys.txt")
# Make a facet object for the field you want to sort on
facet = sorting.FieldFacet("name")
# Wrap the facet in a TranslateFacet with the translation function
# (the Collator object's sort_key method)
facet = sorting.TranslateFacet(c.sort_key, facet)
# Use the facet to sort the search results
results = searcher.search(myquery, sortedby=facet)
You can pass multiple facets to the
"""
def __init__(self, fn, *facets):
"""
:param fn: The function to apply. For each matching document, this
function will be called with the values of the given facets as
arguments.
:param facets: One or more :class:`FacetType` objects. These facets are
used to compute facet value(s) for a matching document, and then the
value(s) is/are passed to the function.
"""
self.fn = fn
self.facets = facets
self.maptype = None
def categorizer(self, global_searcher):
catters = [facet.categorizer(global_searcher) for facet in self.facets]
return self.TranslateCategorizer(self.fn, catters)
class TranslateCategorizer(Categorizer):
def __init__(self, fn, catters):
self.fn = fn
self.catters = catters
def set_searcher(self, segment_searcher, docoffset):
for catter in self.catters:
catter.set_searcher(segment_searcher, docoffset)
def key_for(self, matcher, segment_docnum):
keys = [catter.key_for(matcher, segment_docnum)
for catter in self.catters]
return self.fn(*keys)
class StoredFieldFacet(FacetType):
"""Lets you sort/group using the value in an unindexed, stored field (e.g.
:class:`whoosh.fields.STORED`). This is usually slower than using an indexed
field.
For fields where the stored value is a space-separated list of keywords,
(e.g. ``"tag1 tag2 tag3"``), you can use the ``allow_overlap`` keyword
argument to allow overlapped faceting on the result of calling the
``split()`` method on the field value (or calling a custom split function
if one is supplied).
"""
def __init__(self, fieldname, allow_overlap=False, split_fn=None,
maptype=None):
"""
:param fieldname: the name of the stored field.
:param allow_overlap: if True, when grouping, allow documents to appear
in multiple groups when they have multiple terms in the field. The
categorizer uses ``string.split()`` or the custom ``split_fn`` to
convert the stored value into a list of facet values.
:param split_fn: a custom function to split a stored field value into
multiple facet values when ``allow_overlap`` is True. If not
supplied, the categorizer simply calls the value's ``split()``
method.
"""
self.fieldname = fieldname
self.allow_overlap = allow_overlap
self.split_fn = None
self.maptype = maptype
def default_name(self):
return self.fieldname
def categorizer(self, global_searcher):
return self.StoredFieldCategorizer(self.fieldname, self.allow_overlap,
self.split_fn)
class StoredFieldCategorizer(Categorizer):
def __init__(self, fieldname, allow_overlap, split_fn):
self.fieldname = fieldname
self.allow_overlap = allow_overlap
self.split_fn = split_fn
def set_searcher(self, segment_searcher, docoffset):
self.segment_searcher = segment_searcher
def keys_for(self, matcher, docid):
d = self.segment_searcher.stored_fields(docid)
value = d.get(self.fieldname)
if self.split_fn:
return self.split_fn(value)
else:
return value.split()
def key_for(self, matcher, docid):
d = self.segment_searcher.stored_fields(docid)
return d.get(self.fieldname)
class MultiFacet(FacetType):
"""Sorts/facets by the combination of multiple "sub-facets".
For example, to sort by the value of the "tag" field, and then (for
documents where the tag is the same) by the value of the "path" field::
facet = MultiFacet(FieldFacet("tag"), FieldFacet("path")
results = searcher.search(myquery, sortedby=facet)
As a shortcut, you can use strings to refer to field names, and they will
be assumed to be field names and turned into FieldFacet objects::
facet = MultiFacet("tag", "path")
You can also use the ``add_*`` methods to add criteria to the multifacet::
facet = MultiFacet()
facet.add_field("tag")
facet.add_field("path", reverse=True)
facet.add_query({"a-m": TermRange("name", "a", "m"),
"n-z": TermRange("name", "n", "z")})
"""
def __init__(self, items=None, maptype=None):
self.facets = []
if items:
for item in items:
self._add(item)
self.maptype = maptype
@classmethod
def from_sortedby(cls, sortedby):
multi = cls()
if isinstance(sortedby, string_type):
multi._add(sortedby)
elif (isinstance(sortedby, (list, tuple))
or hasattr(sortedby, "__iter__")):
for item in sortedby:
multi._add(item)
else:
multi._add(sortedby)
return multi
def _add(self, item):
if isinstance(item, FacetType):
self.add_facet(item)
elif isinstance(item, string_type):
self.add_field(item)
else:
raise Exception("Don't know what to do with facet %r" % (item,))
def add_field(self, fieldname, reverse=False):
self.facets.append(FieldFacet(fieldname, reverse=reverse))
return self
def add_query(self, querydict, other=None, allow_overlap=False):
self.facets.append(QueryFacet(querydict, other=other,
allow_overlap=allow_overlap))
return self
def add_score(self):
self.facets.append(ScoreFacet())
return self
def add_facet(self, facet):
if not isinstance(facet, FacetType):
raise TypeError("%r is not a facet object, perhaps you meant "
"add_field()" % (facet,))
self.facets.append(facet)
return self
def categorizer(self, global_searcher):
if not self.facets:
raise Exception("No facets")
elif len(self.facets) == 1:
catter = self.facets[0].categorizer(global_searcher)
else:
catter = self.MultiCategorizer([facet.categorizer(global_searcher)
for facet in self.facets])
return catter
class MultiCategorizer(Categorizer):
def __init__(self, catters):
self.catters = catters
@property
def needs_current(self):
return any(c.needs_current for c in self.catters)
def set_searcher(self, segment_searcher, docoffset):
for catter in self.catters:
catter.set_searcher(segment_searcher, docoffset)
def key_for(self, matcher, docid):
return tuple(catter.key_for(matcher, docid)
for catter in self.catters)
def key_to_name(self, key):
return tuple(catter.key_to_name(keypart)
for catter, keypart
in izip(self.catters, key))
class Facets(object):
"""Maps facet names to :class:`FacetType` objects, for creating multiple
groupings of documents.
For example, to group by tag, and **also** group by price range::
facets = Facets()
facets.add_field("tag")
facets.add_facet("price", RangeFacet("price", 0, 1000, 100))
results = searcher.search(myquery, groupedby=facets)
tag_groups = results.groups("tag")
price_groups = results.groups("price")
(To group by the combination of multiple facets, use :class:`MultiFacet`.)
"""
def __init__(self, x=None):
self.facets = {}
if x:
self.add_facets(x)
@classmethod
def from_groupedby(cls, groupedby):
facets = cls()
if isinstance(groupedby, (cls, dict)):
facets.add_facets(groupedby)
elif isinstance(groupedby, string_type):
facets.add_field(groupedby)
elif isinstance(groupedby, FacetType):
facets.add_facet(groupedby.default_name(), groupedby)
elif isinstance(groupedby, (list, tuple)):
for item in groupedby:
facets.add_facets(cls.from_groupedby(item))
else:
raise Exception("Don't know what to do with groupedby=%r"
% groupedby)
return facets
def names(self):
"""Returns an iterator of the facet names in this object.
"""
return iter(self.facets)
def items(self):
"""Returns a list of (facetname, facetobject) tuples for the facets in
this object.
"""
return self.facets.items()
def add_field(self, fieldname, **kwargs):
"""Adds a :class:`FieldFacet` for the given field name (the field name
is automatically used as the facet name).
"""
self.facets[fieldname] = FieldFacet(fieldname, **kwargs)
return self
def add_query(self, name, querydict, **kwargs):
"""Adds a :class:`QueryFacet` under the given ``name``.
:param name: a name for the facet.
:param querydict: a dictionary mapping keys to
:class:`whoosh.query.Query` objects.
"""
self.facets[name] = QueryFacet(querydict, **kwargs)
return self
def add_facet(self, name, facet):
"""Adds a :class:`FacetType` object under the given ``name``.
"""
if not isinstance(facet, FacetType):
raise Exception("%r:%r is not a facet" % (name, facet))
self.facets[name] = facet
return self
def add_facets(self, facets, replace=True):
"""Adds the contents of the given ``Facets`` or ``dict`` object to this
object.
"""
if not isinstance(facets, (dict, Facets)):
raise Exception("%r is not a Facets object or dict" % facets)
for name, facet in facets.items():
if replace or name not in self.facets:
self.facets[name] = facet
return self
# Objects for holding facet groups
class FacetMap(object):
"""Base class for objects holding the results of grouping search results by
a Facet. Use an object's ``as_dict()`` method to access the results.
You can pass a subclass of this to the ``maptype`` keyword argument when
creating a ``FacetType`` object to specify what information the facet
should record about the group. For example::
# Record each document in each group in its sorted order
myfacet = FieldFacet("size", maptype=OrderedList)
# Record only the count of documents in each group
myfacet = FieldFacet("size", maptype=Count)
"""
def add(self, groupname, docid, sortkey):
"""Adds a document to the facet results.
:param groupname: the name of the group to add this document to.
:param docid: the document number of the document to add.
:param sortkey: a value representing the sort position of the document
in the full results.
"""
raise NotImplementedError
def as_dict(self):
"""Returns a dictionary object mapping group names to
implementation-specific values. For example, the value might be a list
of document numbers, or a integer representing the number of documents
in the group.
"""
raise NotImplementedError
class OrderedList(FacetMap):
"""Stores a list of document numbers for each group, in the same order as
they appear in the search results.
The ``as_dict`` method returns a dictionary mapping group names to lists
of document numbers.
"""
def __init__(self):
self.dict = defaultdict(list)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.dict)
def add(self, groupname, docid, sortkey):
self.dict[groupname].append((sortkey, docid))
def as_dict(self):
d = {}
for key, items in iteritems(self.dict):
d[key] = [docnum for _, docnum in sorted(items)]
return d
class UnorderedList(FacetMap):
"""Stores a list of document numbers for each group, in arbitrary order.
This is slightly faster and uses less memory than
:class:`OrderedListResult` if you don't care about the ordering of the
documents within groups.
The ``as_dict`` method returns a dictionary mapping group names to lists
of document numbers.
"""
def __init__(self):
self.dict = defaultdict(list)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.dict)
def add(self, groupname, docid, sortkey):
self.dict[groupname].append(docid)
def as_dict(self):
return dict(self.dict)
class Count(FacetMap):
"""Stores the number of documents in each group.
The ``as_dict`` method returns a dictionary mapping group names to
integers.
"""
def __init__(self):
self.dict = defaultdict(int)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.dict)
def add(self, groupname, docid, sortkey):
self.dict[groupname] += 1
def as_dict(self):
return dict(self.dict)
class Best(FacetMap):
"""Stores the "best" document in each group (that is, the one with the
highest sort key).
The ``as_dict`` method returns a dictionary mapping group names to
docnument numbers.
"""
def __init__(self):
self.bestids = {}
self.bestkeys = {}
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.bestids)
def add(self, groupname, docid, sortkey):
if groupname not in self.bestids or sortkey < self.bestkeys[groupname]:
self.bestids[groupname] = docid
self.bestkeys[groupname] = sortkey
def as_dict(self):
return self.bestids
# Helper functions
def add_sortable(writer, fieldname, facet, column=None):
"""Adds a per-document value column to an existing field which was created
without the ``sortable`` keyword argument.
>>> from whoosh import index, sorting
>>> ix = index.open_dir("indexdir")
>>> with ix.writer() as w:
... facet = sorting.FieldFacet("price")
... sorting.add_sortable(w, "price", facet)
...
:param writer: a :class:`whoosh.writing.IndexWriter` object.
:param fieldname: the name of the field to add the per-document sortable
values to. If this field doesn't exist in the writer's schema, the
function will add a :class:`whoosh.fields.COLUMN` field to the schema,
and you must specify the column object to using the ``column`` keyword
argument.
:param facet: a :class:`FacetType` object to use to generate the
per-document values.
:param column: a :class:`whosh.columns.ColumnType` object to use to store
the per-document values. If you don't specify a column object, the
function will use the default column type for the given field.
"""
storage = writer.storage
schema = writer.schema
field = None
if fieldname in schema:
field = schema[fieldname]
if field.column_type:
raise Exception("%r field is already sortable" % fieldname)
if column:
if fieldname not in schema:
from whoosh.fields import COLUMN
field = COLUMN(column)
schema.add(fieldname, field)
else:
if fieldname in schema:
column = field.default_column()
else:
raise Exception("Field %r does not exist" % fieldname)
searcher = writer.searcher()
catter = facet.categorizer(searcher)
for subsearcher, docoffset in searcher.leaf_searchers():
catter.set_searcher(subsearcher, docoffset)
reader = subsearcher.reader()
if reader.has_column(fieldname):
raise Exception("%r field already has a column" % fieldname)
codec = reader.codec()
segment = reader.segment()
colname = codec.column_filename(segment, fieldname)
colfile = storage.create_file(colname)
try:
colwriter = column.writer(colfile)
for docnum in reader.all_doc_ids():
v = catter.key_to_name(catter.key_for(None, docnum))
cv = field.to_column_value(v)
colwriter.add(docnum, cv)
colwriter.finish(reader.doc_count_all())
finally:
colfile.close()
field.column_type = column
|
gpl-2.0
|
open-mmlab/mmdetection
|
mmdet/models/dense_heads/reppoints_head.py
|
1
|
34356
|
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import DeformConv2d
from mmdet.core import (build_assigner, build_sampler, images_to_levels,
multi_apply, multiclass_nms, unmap)
from mmdet.core.anchor.point_generator import MlvlPointGenerator
from ..builder import HEADS, build_loss
from .anchor_free_head import AnchorFreeHead
@HEADS.register_module()
class RepPointsHead(AnchorFreeHead):
"""RepPoint head.
Args:
point_feat_channels (int): Number of channels of points features.
gradient_mul (float): The multiplier to gradients from
points refinement and recognition.
point_strides (Iterable): points strides.
point_base_scale (int): bbox scale for assigning labels.
loss_cls (dict): Config of classification loss.
loss_bbox_init (dict): Config of initial points loss.
loss_bbox_refine (dict): Config of points loss in refinement.
use_grid_points (bool): If we use bounding box representation, the
reppoints is represented as grid points on the bounding box.
center_init (bool): Whether to use center point assignment.
transform_method (str): The methods to transform RepPoints to bbox.
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
point_feat_channels=256,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_refine=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method='moment',
moment_mul=0.01,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='reppoints_cls_out',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.num_points = num_points
self.point_feat_channels = point_feat_channels
self.use_grid_points = use_grid_points
self.center_init = center_init
# we use deform conv to extract points features
self.dcn_kernel = int(np.sqrt(num_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
assert self.dcn_kernel * self.dcn_kernel == num_points, \
'The points number should be a square number.'
assert self.dcn_kernel % 2 == 1, \
'The points number should be an odd square number.'
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
super().__init__(
num_classes,
in_channels,
loss_cls=loss_cls,
init_cfg=init_cfg,
**kwargs)
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.point_generator = MlvlPointGenerator(
self.point_strides, offset=0.)
self.sampling = loss_cls['type'] not in ['FocalLoss']
if self.train_cfg:
self.init_assigner = build_assigner(self.train_cfg.init.assigner)
self.refine_assigner = build_assigner(
self.train_cfg.refine.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.transform_method = transform_method
if self.transform_method == 'moment':
self.moment_transfer = nn.Parameter(
data=torch.zeros(2), requires_grad=True)
self.moment_mul = moment_mul
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes
else:
self.cls_out_channels = self.num_classes + 1
self.loss_bbox_init = build_loss(loss_bbox_init)
self.loss_bbox_refine = build_loss(loss_bbox_refine)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
self.cls_out_channels, 1, 1, 0)
self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
self.point_feat_channels, 3,
1, 1)
self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
def points2bbox(self, pts, y_first=True):
"""Converting the points set into bounding box.
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_first=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:return: each points set is converting to a bbox [x1, y1, x2, y2].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
if self.transform_method == 'minmax':
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'partial_minmax':
pts_y = pts_y[:, :4, ...]
pts_x = pts_x[:, :4, ...]
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'moment':
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
moment_transfer = (self.moment_transfer * self.moment_mul) + (
self.moment_transfer.detach() * (1 - self.moment_mul))
moment_width_transfer = moment_transfer[0]
moment_height_transfer = moment_transfer[1]
half_width = pts_x_std * torch.exp(moment_width_transfer)
half_height = pts_y_std * torch.exp(moment_height_transfer)
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
else:
raise NotImplementedError
return bbox
def gen_grid_from_reg(self, reg, previous_boxes):
"""Base on the previous bboxes and regression values, we compute the
regressed bboxes and generate the grids on the bboxes.
:param reg: the regression value to previous bboxes.
:param previous_boxes: previous bboxes.
:return: generate grids on the regressed bboxes.
"""
b, _, h, w = reg.shape
bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
bwh = (previous_boxes[:, 2:, ...] -
previous_boxes[:, :2, ...]).clamp(min=1e-6)
grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
reg[:, 2:, ...])
grid_wh = bwh * torch.exp(reg[:, 2:, ...])
grid_left = grid_topleft[:, [0], ...]
grid_top = grid_topleft[:, [1], ...]
grid_width = grid_wh[:, [0], ...]
grid_height = grid_wh[:, [1], ...]
intervel = torch.linspace(0., 1., self.dcn_kernel).view(
1, self.dcn_kernel, 1, 1).type_as(reg)
grid_x = grid_left + grid_width * intervel
grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
grid_x = grid_x.view(b, -1, h, w)
grid_y = grid_top + grid_height * intervel
grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
grid_y = grid_y.view(b, -1, h, w)
grid_yx = torch.stack([grid_y, grid_x], dim=2)
grid_yx = grid_yx.view(b, -1, h, w)
regressed_bbox = torch.cat([
grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
], 1)
return grid_yx, regressed_bbox
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def forward_single(self, x):
"""Forward feature map of a single FPN level."""
dcn_base_offset = self.dcn_base_offset.type_as(x)
# If we use center_init, the initial reppoints is from center points.
# If we use bounding bbox representation, the initial reppoints is
# from regular grid placed on a pre-defined bbox.
if self.use_grid_points or not self.center_init:
scale = self.point_base_scale / 2
points_init = dcn_base_offset / dcn_base_offset.max() * scale
bbox_init = x.new_tensor([-scale, -scale, scale,
scale]).view(1, 4, 1, 1)
else:
points_init = 0
cls_feat = x
pts_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
pts_feat = reg_conv(pts_feat)
# initialize reppoints
pts_out_init = self.reppoints_pts_init_out(
self.relu(self.reppoints_pts_init_conv(pts_feat)))
if self.use_grid_points:
pts_out_init, bbox_out_init = self.gen_grid_from_reg(
pts_out_init, bbox_init.detach())
else:
pts_out_init = pts_out_init + points_init
# refine and classify reppoints
pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
) + self.gradient_mul * pts_out_init
dcn_offset = pts_out_init_grad_mul - dcn_base_offset
cls_out = self.reppoints_cls_out(
self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
pts_out_refine = self.reppoints_pts_refine_out(
self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
if self.use_grid_points:
pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
pts_out_refine, bbox_out_init.detach())
else:
pts_out_refine = pts_out_refine + pts_out_init.detach()
return cls_out, pts_out_init, pts_out_refine
def get_points(self, featmap_sizes, img_metas, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
Returns:
tuple: points of each image, valid flags of each image
"""
num_imgs = len(img_metas)
# since feature map sizes of all images are the same, we only compute
# points center for one time
multi_level_points = self.point_generator.grid_priors(
featmap_sizes, device, with_stride=True)
points_list = [[point.clone() for point in multi_level_points]
for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level grids
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = self.point_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'])
valid_flag_list.append(multi_level_flags)
return points_list, valid_flag_list
def centers_to_bboxes(self, point_list):
"""Get bboxes according to center points.
Only used in :class:`MaxIoUAssigner`.
"""
bbox_list = []
for i_img, point in enumerate(point_list):
bbox = []
for i_lvl in range(len(self.point_strides)):
scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
bbox_shift = torch.Tensor([-scale, -scale, scale,
scale]).view(1, 4).type_as(point[0])
bbox_center = torch.cat(
[point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center + bbox_shift)
bbox_list.append(bbox)
return bbox_list
def offset_to_pts(self, center_list, pred_list):
"""Change from point offset to point coordinate."""
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, self.num_points)
pts_shift = pred_list[i_lvl][i_img]
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * self.num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
def _point_target_single(self,
flat_proposals,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
stage='init',
unmap_outputs=True):
inside_flags = valid_flags
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample proposals
proposals = flat_proposals[inside_flags, :]
if stage == 'init':
assigner = self.init_assigner
pos_weight = self.train_cfg.init.pos_weight
else:
assigner = self.refine_assigner
pos_weight = self.train_cfg.refine.pos_weight
assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,
None if self.sampling else gt_labels)
sampling_result = self.sampler.sample(assign_result, proposals,
gt_bboxes)
num_valid_proposals = proposals.shape[0]
bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
pos_proposals = torch.zeros_like(proposals)
proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
labels = proposals.new_full((num_valid_proposals, ),
self.num_classes,
dtype=torch.long)
label_weights = proposals.new_zeros(
num_valid_proposals, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_gt_bboxes = sampling_result.pos_gt_bboxes
bbox_gt[pos_inds, :] = pos_gt_bboxes
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
proposals_weights[pos_inds, :] = 1.0
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of proposals
if unmap_outputs:
num_total_proposals = flat_proposals.size(0)
labels = unmap(labels, num_total_proposals, inside_flags)
label_weights = unmap(label_weights, num_total_proposals,
inside_flags)
bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
pos_proposals = unmap(pos_proposals, num_total_proposals,
inside_flags)
proposals_weights = unmap(proposals_weights, num_total_proposals,
inside_flags)
return (labels, label_weights, bbox_gt, pos_proposals,
proposals_weights, pos_inds, neg_inds)
def get_targets(self,
proposals_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
stage='init',
label_channels=1,
unmap_outputs=True):
"""Compute corresponding GT box and classification targets for
proposals.
Args:
proposals_list (list[list]): Multi level points/bboxes of each
image.
valid_flag_list (list[list]): Multi level valid flags of each
image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_bboxes_list (list[Tensor]): Ground truth labels of each box.
stage (str): `init` or `refine`. Generate target for init stage or
refine stage
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501
- bbox_gt_list (list[Tensor]): Ground truth bbox of each level.
- proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501
- proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501
- num_total_pos (int): Number of positive samples in all images. # noqa: E501
- num_total_neg (int): Number of negative samples in all images. # noqa: E501
"""
assert stage in ['init', 'refine']
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
# points number of multi levels
num_level_proposals = [points.size(0) for points in proposals_list[0]]
# concat all level points and flags to a single tensor
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_proposals,
all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
self._point_target_single,
proposals_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
stage=stage,
unmap_outputs=unmap_outputs)
# no valid points
if any([labels is None for labels in all_labels]):
return None
# sampled points of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
labels_list = images_to_levels(all_labels, num_level_proposals)
label_weights_list = images_to_levels(all_label_weights,
num_level_proposals)
bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
proposals_list = images_to_levels(all_proposals, num_level_proposals)
proposal_weights_list = images_to_levels(all_proposal_weights,
num_level_proposals)
return (labels_list, label_weights_list, bbox_gt_list, proposals_list,
proposal_weights_list, num_total_pos, num_total_neg)
def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
label_weights, bbox_gt_init, bbox_weights_init,
bbox_gt_refine, bbox_weights_refine, stride,
num_total_samples_init, num_total_samples_refine):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
cls_score = cls_score.contiguous()
loss_cls = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=num_total_samples_refine)
# points loss
bbox_gt_init = bbox_gt_init.reshape(-1, 4)
bbox_weights_init = bbox_weights_init.reshape(-1, 4)
bbox_pred_init = self.points2bbox(
pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
bbox_pred_refine = self.points2bbox(
pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
normalize_term = self.point_base_scale * stride
loss_pts_init = self.loss_bbox_init(
bbox_pred_init / normalize_term,
bbox_gt_init / normalize_term,
bbox_weights_init,
avg_factor=num_total_samples_init)
loss_pts_refine = self.loss_bbox_refine(
bbox_pred_refine / normalize_term,
bbox_gt_refine / normalize_term,
bbox_weights_refine,
avg_factor=num_total_samples_refine)
return loss_cls, loss_pts_init, loss_pts_refine
def loss(self,
cls_scores,
pts_preds_init,
pts_preds_refine,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
# target for initial stage
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas, device)
pts_coordinate_preds_init = self.offset_to_pts(center_list,
pts_preds_init)
if self.train_cfg.init.assigner['type'] == 'PointAssigner':
# Assign target for center list
candidate_list = center_list
else:
# transform center list to bbox list and
# assign target for bbox list
bbox_list = self.centers_to_bboxes(center_list)
candidate_list = bbox_list
cls_reg_targets_init = self.get_targets(
candidate_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
stage='init',
label_channels=label_channels)
(*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
num_total_pos_init, num_total_neg_init) = cls_reg_targets_init
num_total_samples_init = (
num_total_pos_init +
num_total_neg_init if self.sampling else num_total_pos_init)
# target for refinement stage
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas, device)
pts_coordinate_preds_refine = self.offset_to_pts(
center_list, pts_preds_refine)
bbox_list = []
for i_img, center in enumerate(center_list):
bbox = []
for i_lvl in range(len(pts_preds_refine)):
bbox_preds_init = self.points2bbox(
pts_preds_init[i_lvl].detach())
bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
bbox_center = torch.cat(
[center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center +
bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
bbox_list.append(bbox)
cls_reg_targets_refine = self.get_targets(
bbox_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
stage='refine',
label_channels=label_channels)
(labels_list, label_weights_list, bbox_gt_list_refine,
candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,
num_total_neg_refine) = cls_reg_targets_refine
num_total_samples_refine = (
num_total_pos_refine +
num_total_neg_refine if self.sampling else num_total_pos_refine)
# compute loss
losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
self.loss_single,
cls_scores,
pts_coordinate_preds_init,
pts_coordinate_preds_refine,
labels_list,
label_weights_list,
bbox_gt_list_init,
bbox_weights_list_init,
bbox_gt_list_refine,
bbox_weights_list_refine,
self.point_strides,
num_total_samples_init=num_total_samples_init,
num_total_samples_refine=num_total_samples_refine)
loss_dict_all = {
'loss_cls': losses_cls,
'loss_pts_init': losses_pts_init,
'loss_pts_refine': losses_pts_refine
}
return loss_dict_all
def get_bboxes(self,
cls_scores,
pts_preds_init,
pts_preds_refine,
img_metas,
cfg=None,
rescale=False,
with_nms=True):
assert len(cls_scores) == len(pts_preds_refine)
device = cls_scores[0].device
bbox_preds_refine = [
self.points2bbox(pts_pred_refine)
for pts_pred_refine in pts_preds_refine
]
num_levels = len(cls_scores)
featmap_sizes = [
cls_scores[i].size()[-2:] for i in range(len(cls_scores))
]
multi_level_points = self.point_generator.grid_priors(
featmap_sizes, device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [cls_scores[i][img_id] for i in range(num_levels)]
bbox_pred_list = [
bbox_preds_refine[i][img_id] for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
multi_level_points, img_shape,
scale_factor, cfg, rescale,
with_nms)
result_list.append(proposals)
return result_list
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False,
with_nms=True):
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for i_lvl, (cls_score, bbox_pred, points) in enumerate(
zip(cls_scores, bbox_preds, mlvl_points)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
max_scores, _ = scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
if with_nms:
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
else:
return mlvl_bboxes, mlvl_scores
|
apache-2.0
|
LarryHillyer/PoolHost
|
PoolHost/env/Lib/site-packages/django/core/handlers/base.py
|
96
|
10769
|
from __future__ import unicode_literals
import logging
import sys
import types
import warnings
from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.db import connections, transaction
from django.urls import get_resolver, get_urlconf, set_urlconf
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.module_loading import import_string
from .exception import (
convert_exception_to_response, get_exception_response,
handle_uncaught_exception,
)
logger = logging.getLogger('django.request')
class BaseHandler(object):
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
self._middleware_chain = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE (or the deprecated
MIDDLEWARE_CLASSES).
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._request_middleware = []
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
if settings.MIDDLEWARE is None:
warnings.warn(
"Old-style middleware using settings.MIDDLEWARE_CLASSES is "
"deprecated. Update your middleware and use settings.MIDDLEWARE "
"instead.", RemovedInDjango20Warning
)
handler = convert_exception_to_response(self._legacy_get_response)
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
self._request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
else:
handler = convert_exception_to_response(self._get_response)
for middleware_path in reversed(settings.MIDDLEWARE):
middleware = import_string(middleware_path)
try:
mw_instance = middleware(handler)
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if mw_instance is None:
raise ImproperlyConfigured(
'Middleware factory %s returned None.' % middleware_path
)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.insert(0, mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.append(mw_instance.process_template_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.append(mw_instance.process_exception)
handler = convert_exception_to_response(mw_instance)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._middleware_chain = handler
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests:
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code, exception):
return get_exception_response(request, resolver, status_code, exception, self.__class__)
def get_response(self, request):
"""Return an HttpResponse object for the given HttpRequest."""
# Setup default url resolver for this thread
set_urlconf(settings.ROOT_URLCONF)
response = self._middleware_chain(request)
# This block is only needed for legacy MIDDLEWARE_CLASSES; if
# MIDDLEWARE is used, self._response_middleware will be empty.
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
except Exception: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, get_resolver(get_urlconf()), sys.exc_info())
response._closable_objects.append(request)
# If the exception handler returns a TemplateResponse that has not
# been rendered, force it to be rendered.
if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)):
response = response.render()
if response.status_code == 404:
logger.warning(
'Not Found: %s', request.path,
extra={'status_code': 404, 'request': request},
)
return response
def _get_response(self, request):
"""
Resolve and call the view, then apply view, exception, and
template_response middleware. This method is everything that happens
inside the request/response middleware.
"""
response = None
if hasattr(request, 'urlconf'):
urlconf = request.urlconf
set_urlconf(urlconf)
resolver = get_resolver(urlconf)
else:
resolver = get_resolver()
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError(
"The view %s.%s didn't return an HttpResponse object. It "
"returned None instead." % (callback.__module__, view_name)
)
# If the response supports deferred rendering, apply template
# response middleware and then render the response
elif hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__)
)
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
return response
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, raise it.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
raise
def handle_uncaught_exception(self, request, resolver, exc_info):
"""Allow subclasses to override uncaught exception handling."""
return handle_uncaught_exception(request, resolver, exc_info)
def _legacy_get_response(self, request):
"""
Apply process_request() middleware and call the main _get_response(),
if needed. Used only for legacy MIDDLEWARE_CLASSES.
"""
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
response = self._get_response(request)
return response
|
gpl-3.0
|
skirsdeda/django
|
tests/postgres_tests/test_hstore.py
|
25
|
7536
|
import json
import unittest
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.validators import KeysValidator
from django.core import exceptions, serializers
from django.db import connection
from django.test import TestCase
from .models import HStoreModel
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL required')
class SimpleTests(TestCase):
apps = ['django.contrib.postgres']
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, None)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL required')
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
HStoreModel.objects.create(field={'a': 'b'}),
HStoreModel.objects.create(field={'a': 'b', 'c': 'd'}),
HStoreModel.objects.create(field={'c': 'd'}),
HStoreModel.objects.create(field={}),
HStoreModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL required')
class TestSerialization(TestCase):
test_data = '[{"fields": {"field": "{\\"a\\": \\"b\\"}"}, "model": "postgres_tests.hstoremodel", "pk": null}]'
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(TestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of "a" is not a string.')
class TestFormField(TestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
class TestValidator(TestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
|
bsd-3-clause
|
architecture-building-systems/CityEnergyAnalyst
|
cea/tests/__init__.py
|
2
|
1234
|
"""
Run the CEA scripts and unit tests as part of our CI efforts (cf. The Jenkins)
"""
import os
import shutil
import tempfile
import cea.config
import cea.inputlocator
import cea.workflows.workflow
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def main(config):
workflow_yml = os.path.join(os.path.dirname(__file__), "workflow_{workflow}.yml".format(workflow=config.test.workflow))
default_config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
default_config.project = os.path.expandvars("${TEMP}/reference-case-open")
default_config.workflow.workflow = workflow_yml
default_config.workflow.resume = False
default_config.workflow.resume_file = tempfile.mktemp("resume.yml") # force resume file to be temporary
if os.path.exists(default_config.project):
# make sure we're working on a clean slate
shutil.rmtree(default_config.project)
cea.workflows.workflow.main(default_config)
if __name__ == '__main__':
main(cea.config.Configuration())
|
mit
|
jpush/jbox
|
Server/venv/lib/python3.5/site-packages/requests/packages/chardet/chardetect.py
|
1786
|
2504
|
#!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
|
mit
|
ChienliMa/Blends
|
blends.py
|
1
|
11784
|
"""
Implement different Image blend Mode.
Author: Chienli Ma
Date: 2015.01.08
Incentive:
It's hard to find a image blending tool in python.
Some rare tool either have little mode or out_of_dated and hard to install.
Therefore I just implement all methods available on in Internet.
Recommended usage:
import blends
result1 = blends.blend( top, base, 'mode' )
result2 = blends.blend( top, base, ['mode_1', ... ,'mode_n'])
Not recommended usage:
import blends
result1 = blends.mode_name( top, base )
Reference:
MSDN:
http://msdn.microsoft.com/en-us/library/hh706313.aspx
WikiPeida:
http://en.wikipedia.org/wiki/Blend_modes
http://en.wikipedia.org/wiki/HSL_and_HSV
Todo:
Replace lambda with np.where and find if efficiency improve
"""
import numpy as np
import pdb
###########
# lambdas #
###########
darker_color_func = lambda x,y,m,n: x if m<n else y
lighter_color_func = lambda x,y,m,n: x if m>n else y
dissolve_func = lambda x, y, m, n : x/m if m>0 and m>n else y
overlay_func = lambda x,y: 2*x*y if x <0.5 else 1- 2*(1-x)*(1-y)
hard_light_func = lambda x,y: 2*x*y if x<0.5 else 1 - 2*(1-x)*(1-y)
vivid_light_func = lambda x,y: (2*x+y-1)/(2*x+10e-5) if x<0.5 else y/(2*(1-x+10e-5))
pin_light_func = lambda x,y: max(x,2*y-1) if x>0.5 else min(x,2*y)
color_dodge_func = lambda x,y: np.clip( x/(1-y) if y<1 else 1, 0 , 1 )
color_burn_func = lambda x,y: 1-(1-x)/y if y>0 else 0
def blend( top, base, mode, top_alpha = None, base_alpha = None, mask = None ):
"""
Blend top to base using specific mode
Parameters:
top - 3 dimension numpy.ndarray with 3 channels are [ width, height, \
channel ]
base - same as top, has same shape as top
mode - string of mode name
top_alpha - 3 dimension numpy.ndarray, the last channel represents \
the alpha channel and should have same value
base_alpha - same as top_alpha
mask - 3 dimension numpy.ndarray, where mask != 0 will be blended
Returns:
3 dimension numpy.ndarray with same shape as top and base
"""
assert top.dtype == base.dtype
assert top.shape == base.shape
w, h, c = top.shape
mode = globals().get( mode )
mask = ( mask or np.ones( top.shape ) ).flatten()
if top.dtype == np.float: # if is float
# flatten array for 'map' opeartion
top = top.flatten()
result = result.flatten()
if top_alpha != None:
top_alpha = top_alpha.flatten()
if base_alpha != None:
base_alpha = base_alpha.flatten()
result = copy( mode(top, base, top_alpha, base_alpha), base, mask)
else:
# flatten array for 'map' opeartion
top = top.flatten() / 255.0
base = base.flatten() / 255.0
if top_alpha != None:
top_alpha = top_alpha.flatten()/255.0
if base_alpha != None:
base_alpha = base_alpha.flatten()/255.0
result = copy( mode(top, base, top_alpha, base_alpha), base, mask)
result = (result*255).astype('uint8')
return result.reshape( [w, h, c] )
def random_blend( top, base, top_alpha = None , base_alpha = None, modes = None ):
"""
Blend top to base using random mode. Parameters are similar to blend.
Parameters:
modes - List of mode names,Optional. This function will randomly pick \
a mode in modes to blend images. If it is not specified, this \
method will pick a mode in all available modes.
"""
if modes == None:
modes = all_modes()
modes = modes.split()
mode = modes[np.random.randint(0,len(modes))]
return blend( top, base, mode , top_alpha = None, base_alpha = None)
########################
# Single blend methods #
########################
def normal( top, base, top_alpha = None, base_alpha = None ):
"""
Normal mode simply replace base layer with top layer
"""
return base
def darken( top, base, top_alpha = None, base_alpha = None ):
"""
Darken it. = =( yes, I need a better explaination here.)
"""
if top_alpha == None:
top_alpha = 1
if base_alpha == None:
base_alpha = 1
return np.minimum( (1-top_alpha)*base+top, (1-base_alpha)*top+base )
def lighten( top, base, top_alpha = None, base_alpha = None ):
"""
Lighten it. = =( yes, I need a better explaination here.)
P.S.:
In MSDN's Document, darken and lighten are the same, here I use
maximum instead of minimum in the darken
"""
if top_alpha == None:
top_alpha = 1
if base_alpha == None:
base_alpha = 1
tmp = np.maximum( (1-top_alpha)*base+top, (1-base_alpha)*top+base )
return np.clip( tmp, 0, 1 )
def darker_color( top, base, top_aplha = None, base_alpha = None ):
"""
Darker color mode simply take the pixel with lower luminance
"""
top_lum = lum( top )
base_lum = lum( base )
return np.array( map(darker_color_func, base, top, base_lum, top_lum) )
def lighter_color( top, base, top_aplha = None, base_alpha = None ):
"""
Lighter color mode simply take the pixel with bigger luminance
"""
top_lum = lum( top )
base_lum = lum( base )
return np.array( map(lighter_color_func, base, top, base_lum, top_lum) )
def dissolve( top, base, top_alpha = None, base_alpha = None):
"""
The dissolve mode takes random pixels from both layers. With high \
opacity, most pixels are taken from the top layer. With low opacity \
most pixels are taken from the bottom layer.
"""
if top_alpha == None:
top_alpha = np.ones(top.size)
rand = np.random.uniform( 0, 1, top.size )
return np.array( map( dissolve_func, top, base, top_alpha,rand ) )
def add( top, base, top_alpha = None, base_alpha = None ):
"""
Add mode simply sums two layers and clips the result to [0,1]
"""
if top_alpha == None:
top_alpha = 1
if base_alpha == None:
base_alpha = 1
return np.clip( top * top_alpha + base * base_alpha, 0, 1 )
def substract( top, base, top_alpha = None, base_alpha = None ):
"""
Add mode simply substracts two layers and clips the result to [0,1]
"""
return np.clip( base - top , 0, 1 )
def multiply( top, base, top_alpha = None, base_alpha = None ):
"""
Multiply mode simply multiply two layers
"""
if top_alpha == None:
top_alpha = 1
if base_alpha == None:
base_alpha = 1
return top*base + top*(1-base_alpha) + base*(1-top_alpha)
def divide( top, base, top_alpha = None, base_alpha = None ):
"""
Divide modes simply divid base layer by top layer and clips it yo [0,1]
Divide by black yield white
"""
return np.clip( base / (top+10e-7) , 0, 1 )
def screen( top, base, top_alpha = None, base_alpha = None ):
"""
Screen blend mode inverts both layers, multiplies them, and then inverts \
that result.
"""
return 1 - ( 1 - top ) * ( 1 - base )
def overlay( top, base, top_alpha = None, base_alpha = None ):
"""
Overlay combines Multiply and Screen blend modes.[3] The parts of the top \
layer where base layer is light become lighter, the parts where the base \
layer is dark become darker.
"""
return np.array( map( overlay_func, base, top ))
def hard_light( top, base, top_alpha = None, base_alpha = None ):
"""
Hard light mode! = =( yes, I need a better explaination here.)
"""
return np.array( map( hard_light_func, top, base ) )
def soft_light( top, base, top_alpha = None, base_alpha = None ):
"""
Soft light mode! = =( yes, I need a better explaination here.)
P.S.:
There are many differet formular for implemnt soft light mode.
Here I Pegtop's formular, for more details look at loft light mode in:
http://en.wikipedia.org/wiki/Blend_modes#Soft_Light
"""
return ( 1 - 2*top )*base**2 + 2*base*top
def vivid_light( top, base, top_alpha = None, base_alpha = None ):
"""
Vivid Light:
This blend mode combines Color Dodge and Color Burn \
(rescaled so that neutral colors become middle gray). Dodge applies \
when values in the top layer are lighter than middle gray, and burn \
to darker values.
P.S.:
For a simpler implementation, I use small sigma to avoid top of being \
0 or 1. So there might be a small diferece between actual result and \
expected result.
"""
return np.clip( map( vivid_light_func, top, base ), 0 ,1 )
def linear_light( top, base, top_alpha = None, base_alpha = None ):
"""
Linear Light: this blend mode combines Linear Dodge and Linear Burn \
(rescaled so that neutral colors become middle gray). Dodge applies to \
values of top layer lighter than middle gray, and burn to darker values. \
The calculation simplifies to the sum of bottom layer and twice the top \
layer, subtract 1.
"""
return np.clip( base + 2 * top - 1, 0, 1)
def pin_light( top, base, top_alpha = None, base_alpha = None ):
"""
Pin light mode! = =( yes, I need a better explaination here.)
"""
return np.clip( map( pin_light_func, base, top ), 0 ,1 )
def color_dodge( top, base, top_alpha = None, base_alpha = None ):
"""
Color Dodge blend mode divides the bottom layer by the inverted top layer.
"""
return np.array( map( color_dodge_func, base, top ) )
def linear_dodge( top, base, top_alpha = None, base_alpha = None ):
"""
Linear Dodge blend mode simply sums the values in the two layers. \
Blending with white gives white.
"""
return add( top, base )
def color_burn( top, base, top_alpha = None, base_alpha = None ):
"""
Color Burn mode divides the inverted bottom layer by the top layer,
and then inverts the result.
"""
return np.clip( np.array( map( color_burn_func, base, top ) ), 0, 1 )
def linear_burn( top, base, top_alpha = None, base_alpha = None ):
"""
Linear Burn mode sums the value in the two layers and subtracts 1
"""
return np.clip( top+base-1, 0, 1 )
def dark_only( top, base, top_alpha = None, base_alpha = None ):
"""
Pixel's value are given by (min(r1,r2), min(g1,g2), min(b1,b2))
"""
return np.minimum( top, base)
def light_only( top, base, top_alpha = None, base_alpha = None ):
"""
Pixel's value are given by (min(r1,r2), min(g1,g2), min(b1,b2))
"""
return np.maximum( top, base )
# Auxilary function
def lum( src ):
"""
Luminance = 0.5 * min(R,G,B) + 0.5 * max(R,G,B)
input: image with channel of 3
output: luminance map with channel of 3. Channels are the same.
Reference:
http://en.wikipedia.org/wiki/HSL_and_HSV#Lightness
"""
lum = np.array( [ src[0::3],
src[1::3],
src[2::3] ] )
return lum.mean(axis=0).repeat(3)
def copy( src, dst, mask ):
"""
Behave like cv.Copy. src, dst and mask should have exact same size.
"""
assert src.shape == dst.shape
assert src.shape == mask.shape
result = dst.copy()
result[ np.where( mask!=0 )] = src[ np.where( mask!=0 )]
return result
def help():
print """
Recommand usages:
blends.blend( top, base, mask, mode, *[top_alpha[base_alpha]])
Type methods to view all single methods.
"""
def all_modes():
return """
normal
add substract multiply divide
dissolve overlay screen pin_light
linear_light soft_light vivid_light hard_light
linear_dodge color_dodge linear_burn color_burn
light_only dark_only lighten darken
lighter_color darker_color
"""
|
gpl-2.0
|
pabigot/pyxb
|
tests/trac/trac-0202/check.py
|
2
|
1553
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
import pyxb
import sample
from pyxb.namespace.builtin import XMLSchema_instance as xsi
class TestTrac0202 (unittest.TestCase):
def tearDown (self):
pyxb.utils.domutils.BindingDOMSupport.SetDefaultNamespace(sample.Namespace)
Expectedt = """<?xml version="1.0" encoding="utf-8"?>
<samplerootelement xmlns="http://sample" xmlns:ns1="http://sample" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="c:\sample.xsd">
\t<sampleelement>
\t\t<ValueAmount ns1:currencyID="abc">100.0</ValueAmount>
\t</sampleelement>
</samplerootelement>
"""
Expectedd = Expectedt.encode('utf-8')
def testIssue (self):
elm = sample.sampleelementType()
elm.ValueAmount = '100'
elm.ValueAmount.currencyID = 'abc'
sam = sample.samplerootelement()
sam.sampleelement.append(elm)
bds = pyxb.utils.domutils.BindingDOMSupport()
bds.setDefaultNamespace(sample.Namespace)
bds.declareNamespace(xsi)
samdom = sam.toDOM(bds)
bds.addAttribute(samdom.documentElement, xsi.createExpandedName('schemaLocation'), "c:\sample.xsd")
# xsi is probably not referenced elsewhere, so add the XMLNS declaration too
bds.addXMLNSDeclaration(samdom.documentElement, xsi)
xmld = samdom.toprettyxml(encoding = "utf-8")
self.assertEqual(self.Expectedd, xmld)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
canassa/django-rest-framework
|
tests/test_response.py
|
68
|
10811
|
from __future__ import unicode_literals
from django.conf.urls import include, url
from django.test import TestCase
from django.utils import six
from rest_framework import generics, routers, serializers, status, viewsets
from rest_framework.renderers import (
BaseRenderer, BrowsableAPIRenderer, JSONRenderer
)
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from tests.models import BasicModel
# Serializer used to test BasicModel
class BasicModelSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
class MockPickleRenderer(BaseRenderer):
media_type = 'application/pickle'
class MockJsonRenderer(BaseRenderer):
media_type = 'application/json'
class MockTextMediaRenderer(BaseRenderer):
media_type = 'text/html'
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
def RENDERER_A_SERIALIZER(x):
return ('Renderer A: %s' % x).encode('ascii')
def RENDERER_B_SERIALIZER(x):
return ('Renderer B: %s' % x).encode('ascii')
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_A_SERIALIZER(data)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_B_SERIALIZER(data)
class RendererC(RendererB):
media_type = 'mock/rendererc'
format = 'formatc'
charset = "rendererc"
class MockView(APIView):
renderer_classes = (RendererA, RendererB, RendererC)
def get(self, request, **kwargs):
return Response(DUMMYCONTENT, status=DUMMYSTATUS)
class MockViewSettingContentType(APIView):
renderer_classes = (RendererA, RendererB, RendererC)
def get(self, request, **kwargs):
return Response(DUMMYCONTENT, status=DUMMYSTATUS, content_type='setbyview')
class HTMLView(APIView):
renderer_classes = (BrowsableAPIRenderer, )
def get(self, request, **kwargs):
return Response('text')
class HTMLView1(APIView):
renderer_classes = (BrowsableAPIRenderer, JSONRenderer)
def get(self, request, **kwargs):
return Response('text')
class HTMLNewModelViewSet(viewsets.ModelViewSet):
serializer_class = BasicModelSerializer
queryset = BasicModel.objects.all()
class HTMLNewModelView(generics.ListCreateAPIView):
renderer_classes = (BrowsableAPIRenderer,)
permission_classes = []
serializer_class = BasicModelSerializer
queryset = BasicModel.objects.all()
new_model_viewset_router = routers.DefaultRouter()
new_model_viewset_router.register(r'', HTMLNewModelViewSet)
urlpatterns = [
url(r'^setbyview$', MockViewSettingContentType.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
url(r'^$', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
url(r'^html$', HTMLView.as_view()),
url(r'^html1$', HTMLView1.as_view()),
url(r'^html_new_model$', HTMLNewModelView.as_view()),
url(r'^html_new_model_viewset', include(new_model_viewset_router.urls)),
url(r'^restframework', include('rest_framework.urls', namespace='rest_framework'))
]
# TODO: Clean tests bellow - remove duplicates with above, better unit testing, ...
class RendererIntegrationTests(TestCase):
"""
End-to-end testing of renderers using an ResponseMixin on a generic view.
"""
urls = 'tests.test_response'
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, six.b(''))
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_accept_query(self):
"""The '_accept' query string should behave in the same way as the Accept header."""
param = '?%s=%s' % (
api_settings.URL_ACCEPT_OVERRIDE,
RendererB.media_type
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
class Issue122Tests(TestCase):
"""
Tests that covers #122.
"""
urls = 'tests.test_response'
def test_only_html_renderer(self):
"""
Test if no infinite recursion occurs.
"""
self.client.get('/html')
def test_html_renderer_is_first(self):
"""
Test if no infinite recursion occurs.
"""
self.client.get('/html1')
class Issue467Tests(TestCase):
"""
Tests for #467
"""
urls = 'tests.test_response'
def test_form_has_label_and_help_text(self):
resp = self.client.get('/html_new_model')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
# self.assertContains(resp, 'Text comes here')
# self.assertContains(resp, 'Text description.')
class Issue807Tests(TestCase):
"""
Covers #807
"""
urls = 'tests.test_response'
def test_does_not_append_charset_by_default(self):
"""
Renderers don't include a charset unless set explicitly.
"""
headers = {"HTTP_ACCEPT": RendererA.media_type}
resp = self.client.get('/', **headers)
expected = "{0}; charset={1}".format(RendererA.media_type, 'utf-8')
self.assertEqual(expected, resp['Content-Type'])
def test_if_there_is_charset_specified_on_renderer_it_gets_appended(self):
"""
If renderer class has charset attribute declared, it gets appended
to Response's Content-Type
"""
headers = {"HTTP_ACCEPT": RendererC.media_type}
resp = self.client.get('/', **headers)
expected = "{0}; charset={1}".format(RendererC.media_type, RendererC.charset)
self.assertEqual(expected, resp['Content-Type'])
def test_content_type_set_explicitly_on_response(self):
"""
The content type may be set explicitly on the response.
"""
headers = {"HTTP_ACCEPT": RendererC.media_type}
resp = self.client.get('/setbyview', **headers)
self.assertEqual('setbyview', resp['Content-Type'])
def test_viewset_label_help_text(self):
param = '?%s=%s' % (
api_settings.URL_ACCEPT_OVERRIDE,
'text/html'
)
resp = self.client.get('/html_new_model_viewset/' + param)
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
# self.assertContains(resp, 'Text comes here')
# self.assertContains(resp, 'Text description.')
def test_form_has_label_and_help_text(self):
resp = self.client.get('/html_new_model')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
# self.assertContains(resp, 'Text comes here')
# self.assertContains(resp, 'Text description.')
|
bsd-2-clause
|
cuongnv23/ansible
|
test/units/plugins/connection/test_ssh.py
|
25
|
29424
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import pytest
from ansible import constants as C
from ansible.compat.selectors import SelectorKey, EVENT_READ
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock, PropertyMock
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ssh
class TestConnectionBaseClass(unittest.TestCase):
def test_plugins_connection_ssh_basic(self):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
# connect just returns self, so assert that
res = conn._connect()
self.assertEqual(conn, res)
ssh.SSHPASS_AVAILABLE = False
self.assertFalse(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = True
self.assertTrue(conn._sshpass_available())
with patch('subprocess.Popen') as p:
ssh.SSHPASS_AVAILABLE = None
p.return_value = MagicMock()
self.assertTrue(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = None
p.return_value = None
p.side_effect = OSError()
self.assertFalse(conn._sshpass_available())
conn.close()
self.assertFalse(conn._connected)
def test_plugins_connection_ssh__build_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
conn._build_command('ssh')
def test_plugins_connection_ssh_exec_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
conn._build_command = MagicMock()
conn._build_command.return_value = 'ssh something something'
conn._run = MagicMock()
conn._run.return_value = (0, 'stdout', 'stderr')
res, stdout, stderr = conn.exec_command('ssh')
res, stdout, stderr = conn.exec_command('ssh', 'this is some data')
def test_plugins_connection_ssh__examine_output(self):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
conn.check_password_prompt = MagicMock()
conn.check_become_success = MagicMock()
conn.check_incorrect_password = MagicMock()
conn.check_missing_password = MagicMock()
def _check_password_prompt(line):
if b'foo' in line:
return True
return False
def _check_become_success(line):
if b'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line:
return True
return False
def _check_incorrect_password(line):
if b'incorrect password' in line:
return True
return False
def _check_missing_password(line):
if b'bad password' in line:
return True
return False
conn.check_password_prompt.side_effect = _check_password_prompt
conn.check_become_success.side_effect = _check_become_success
conn.check_incorrect_password.side_effect = _check_incorrect_password
conn.check_missing_password.side_effect = _check_missing_password
# test examining output for prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = True
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'this should be the remainder')
self.assertTrue(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
pc.success_key = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertTrue(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become failure
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nincorrect password\n', True)
self.assertEqual(output, b'line 1\nline 2\nincorrect password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertTrue(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for missing password
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nbad password\n', True)
self.assertEqual(output, b'line 1\nbad password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertTrue(conn._flags['become_nopasswd_error'])
@patch('time.sleep')
@patch('os.path.exists')
def test_plugins_connection_ssh_put_file(self, mock_ospe, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
mock_ospe.return_value = True
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
C.ANSIBLE_SSH_RETRIES = 9
# Test with C.DEFAULT_SCP_IF_SSH set to smart
# Test when SFTP works
C.DEFAULT_SCP_IF_SSH = 'smart'
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn._bare_run.side_effect = None
# test with C.DEFAULT_SCP_IF_SSH enabled
C.DEFAULT_SCP_IF_SSH = True
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with C.DEFAULT_SCP_IF_SSH disabled
C.DEFAULT_SCP_IF_SSH = False
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'put',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
# test that a not-found path raises an error
mock_ospe.return_value = False
conn._bare_run.return_value = (0, 'stdout', '')
self.assertRaises(AnsibleFileNotFound, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
@patch('time.sleep')
def test_plugins_connection_ssh_fetch_file(self, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
C.ANSIBLE_SSH_RETRIES = 9
# Test with C.DEFAULT_SCP_IF_SSH set to smart
# Test when SFTP works
C.DEFAULT_SCP_IF_SSH = 'smart'
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn._bare_run.side_effect = None
# test with C.DEFAULT_SCP_IF_SSH enabled
C.DEFAULT_SCP_IF_SSH = True
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with C.DEFAULT_SCP_IF_SSH disabled
C.DEFAULT_SCP_IF_SSH = False
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'get',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.fetch_file, '/path/to/bad/file', '/remote/path/to/file')
class MockSelector(object):
def __init__(self):
self.files_watched = 0
self.register = MagicMock(side_effect=self._register)
self.unregister = MagicMock(side_effect=self._unregister)
self.close = MagicMock()
self.get_map = MagicMock(side_effect=self._get_map)
self.select = MagicMock()
def _register(self, *args, **kwargs):
self.files_watched += 1
def _unregister(self, *args, **kwargs):
self.files_watched -= 1
def _get_map(self, *args, **kwargs):
return self.files_watched
@pytest.fixture
def mock_run_env(request, mocker):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
conn._send_initial_data = MagicMock()
conn._examine_output = MagicMock()
conn._terminate_process = MagicMock()
conn.sshpass_pipe = [MagicMock(), MagicMock()]
request.cls.pc = pc
request.cls.conn = conn
mock_popen_res = MagicMock()
mock_popen_res.poll = MagicMock()
mock_popen_res.wait = MagicMock()
mock_popen_res.stdin = MagicMock()
mock_popen_res.stdin.fileno.return_value = 1000
mock_popen_res.stdout = MagicMock()
mock_popen_res.stdout.fileno.return_value = 1001
mock_popen_res.stderr = MagicMock()
mock_popen_res.stderr.fileno.return_value = 1002
mock_popen_res.returncode = 0
request.cls.mock_popen_res = mock_popen_res
mock_popen = mocker.patch('subprocess.Popen', return_value=mock_popen_res)
request.cls.mock_popen = mock_popen
request.cls.mock_selector = MockSelector()
mocker.patch('ansible.compat.selectors.DefaultSelector', lambda: request.cls.mock_selector)
request.cls.mock_openpty = mocker.patch('pty.openpty')
mocker.patch('fcntl.fcntl')
mocker.patch('os.write')
mocker.patch('os.close')
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRun(object):
# FIXME:
# These tests are little more than a smoketest. Need to enhance them
# a bit to check that they're calling the relevant functions and making
# complete coverage of the code paths
def test_no_escalation(self):
self.mock_popen_res.stdout.read.side_effect = [b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"my_stderr"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_with_password(self):
# test with a password set to trigger the sshpass write
self.pc.password = '12345'
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run(["ssh", "is", "a", "cmd"], "this is more data")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is more data'
def _password_with_prompt_examine_output(self, sourice, state, b_chunk, sudoable):
if state == 'awaiting_prompt':
self.conn._flags['become_prompt'] = True
elif state == 'awaiting_escalation':
self.conn._flags['become_success'] = True
return (b'', b'')
def test_password_with_prompt(self):
# test with password prompting enabled
self.pc.password = None
self.pc.prompt = b'Password:'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"Success", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ),
(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b''
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_password_with_become(self):
# test with some become settings
self.pc.prompt = b'Password:'
self.pc.become = True
self.pc.success_key = 'BECOME-SUCCESS-abcdefg'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"BECOME-SUCCESS-abcdefg", b"abc"]
self.mock_popen_res.stderr.read.side_effect = [b"123"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b'abc'
assert b_stderr == b'123'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_pasword_without_data(self):
# simulate no data input
self.mock_openpty.return_value = (98, 99)
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is False
def test_pasword_without_data(self):
# simulate no data input but Popen using new pty's fails
self.mock_popen.return_value = None
self.mock_popen.side_effect = [OSError(), self.mock_popen_res]
# simulate no data input
self.mock_openpty.return_value = (98, 99)
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is False
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRetries(object):
def test_retry_then_success(self, monkeypatch):
monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 3 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
return_code, b_stdout, b_stderr = self.conn.exec_command('ssh', 'some data')
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
def test_multiple_failures(self, monkeypatch):
monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b""] * 10
self.mock_popen_res.stderr.read.side_effect = [b""] * 10
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 30)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
] * 10
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
pytest.raises(AnsibleConnectionFailure, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_abitrary_exceptions(self, monkeypatch):
monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
self.mock_popen.side_effect = [Exception('bad')] * 10
pytest.raises(Exception, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_put_file_retries(self, monkeypatch):
monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.put_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
def test_fetch_file_retries(self, monkeypatch):
monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
|
gpl-3.0
|
omasanori/gyp
|
test/subdirectory/gyptest-top-all.py
|
261
|
1373
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
There is a difference here in the default behavior of the underlying
build tools. Specifically, when building the entire "solution", Xcode
puts the output of each project relative to the .xcodeproj directory,
while Visual Studio (and our implementation of Make) put it
in a build directory relative to the "solution"--that is, the entry-point
from which you built the entire tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('prog1.gyp', test.ALL, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
bsd-3-clause
|
InspectorIncognito/visualization
|
AndroidRequests/migrations/0904_transformation_eventforbusv2_half_hour_period.py
|
1
|
2254
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
from django.db import models, migrations
def fill_tables(apps, schema_editor):
eventsforbusv2 = apps.get_model('AndroidRequests', 'EventForBusv2')
eventsforbusstop = apps.get_model('AndroidRequests', 'EventForBusStop')
hhperiods = apps.get_model('AndroidRequests', 'HalfHourPeriod')
for ev in eventsforbusv2.objects.all():
creationTime = timezone.localtime(ev.timeCreation).time().replace(microsecond=0)
hhperiod = hhperiods.objects.get(initial_time__lte = creationTime , end_time__gte = creationTime)
ev.halfHourPeriod = hhperiod
ev.save()
for ev in eventsforbusstop.objects.all():
creationTime = timezone.localtime(ev.timeCreation).time().replace(microsecond=0)
hhperiod = hhperiods.objects.get(initial_time__lte = creationTime , end_time__gte = creationTime)
ev.halfHourPeriod = hhperiod
ev.save()
class Migration(migrations.Migration):
dependencies = [
('AndroidRequests', '0903_transformation_halfhourperiod'),
]
operations = [
migrations.AddField(
model_name='eventforbusv2',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='eventforbusstop',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=True),
preserve_default=False,
),
migrations.RunPython(fill_tables, reverse_code=migrations.RunPython.noop),
migrations.AlterField(
model_name='eventforbusv2',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=False),
),
migrations.AlterField(
model_name='eventforbusstop',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=False),
),
]
|
gpl-3.0
|
Beauhurst/django
|
django/contrib/gis/gdal/field.py
|
62
|
6699
|
from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
Wrap an OGR Field. Needs to be instantiated from a Feature object.
"""
def __init__(self, feat, index):
"""
Initialize on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Return the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieve the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieve the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieve the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieve the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Return the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Return the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Return the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Return the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Return the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Return an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Return a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Return a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Return a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Return a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
|
bsd-3-clause
|
tjsavage/rototutor_djangononrel
|
django/core/cache/backends/base.py
|
15
|
5118
|
"Base Cache class."
import warnings
from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', 300)
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
def add(self, key, value, timeout=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError
def get(self, key, default=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError
def set(self, key, value, timeout=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError
def delete(self, key):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError
def get_many(self, keys):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k)
if val is not None:
d[k] = val
return d
def has_key(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key) is not None
def incr(self, key, delta=1):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
if key not in self:
raise ValueError("Key '%s' not found" % key)
new_value = self.get(key) + delta
self.set(key, new_value)
return new_value
def decr(self, key, delta=1):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout)
def delete_many(self, keys):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
|
bsd-3-clause
|
GrimDerp/numpy
|
numpy/f2py/diagnose.py
|
188
|
5295
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import os
import sys
import tempfile
def run_command(cmd):
print('Running %r:' % (cmd))
os.system(cmd)
print('------')
def run():
_path = os.getcwd()
os.chdir(tempfile.gettempdir())
print('------')
print('os.name=%r' % (os.name))
print('------')
print('sys.platform=%r' % (sys.platform))
print('------')
print('sys.version:')
print(sys.version)
print('------')
print('sys.prefix:')
print(sys.prefix)
print('------')
print('sys.path=%r' % (':'.join(sys.path)))
print('------')
try:
import numpy
has_newnumpy = 1
except ImportError:
print('Failed to import new numpy:', sys.exc_info()[1])
has_newnumpy = 0
try:
from numpy.f2py import f2py2e
has_f2py2e = 1
except ImportError:
print('Failed to import f2py2e:', sys.exc_info()[1])
has_f2py2e = 0
try:
import numpy.distutils
has_numpy_distutils = 2
except ImportError:
try:
import numpy_distutils
has_numpy_distutils = 1
except ImportError:
print('Failed to import numpy_distutils:', sys.exc_info()[1])
has_numpy_distutils = 0
if has_newnumpy:
try:
print('Found new numpy version %r in %s' %
(numpy.__version__, numpy.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_f2py2e:
try:
print('Found f2py2e version %r in %s' %
(f2py2e.__version__.version, f2py2e.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_numpy_distutils:
try:
if has_numpy_distutils == 2:
print('Found numpy.distutils version %r in %r' % (
numpy.distutils.__version__,
numpy.distutils.__file__))
else:
print('Found numpy_distutils version %r in %r' % (
numpy_distutils.numpy_distutils_version.numpy_distutils_version,
numpy_distutils.__file__))
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 1:
print(
'Importing numpy_distutils.command.build_flib ...', end=' ')
import numpy_distutils.command.build_flib as build_flib
print('ok')
print('------')
try:
print(
'Checking availability of supported Fortran compilers:')
for compiler_class in build_flib.all_compilers:
compiler_class(verbose=1).is_available()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print(
'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)')
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.fcompiler ...', end=' ')
import numpy.distutils.fcompiler as fcompiler
else:
print('Importing numpy_distutils.fcompiler ...', end=' ')
import numpy_distutils.fcompiler as fcompiler
print('ok')
print('------')
try:
print('Checking availability of supported Fortran compilers:')
fcompiler.show_fcompilers()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.cpuinfo ...', end=' ')
from numpy.distutils.cpuinfo import cpuinfo
print('ok')
print('------')
else:
try:
print(
'Importing numpy_distutils.command.cpuinfo ...', end=' ')
from numpy_distutils.command.cpuinfo import cpuinfo
print('ok')
print('------')
except Exception as msg:
print('error:', msg, '(ignore it)')
print('Importing numpy_distutils.cpuinfo ...', end=' ')
from numpy_distutils.cpuinfo import cpuinfo
print('ok')
print('------')
cpu = cpuinfo()
print('CPU information:', end=' ')
for name in dir(cpuinfo):
if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])():
print(name[1:], end=' ')
print('------')
except Exception as msg:
print('error:', msg)
print('------')
os.chdir(_path)
if __name__ == "__main__":
run()
|
bsd-3-clause
|
MagazinnikIvan/pywinauto
|
pywinauto/tests/overlapping.py
|
1
|
7707
|
# GUI Application automation and testing library
# Copyright (C) 2006-2017 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Overlapping Test
**What is checked**
The overlapping test checks for controls that occupy the same space as some
other control in the dialog.
+ If the reference controls are available check for each pair of controls:
- If controls are exactly the same size and position in reference then
make sure that they are also in the localised.
- If a reference control is wholly contained in another make sure that the
same happens for the controls being tested.
+ If the reference controls are not available only the following check can
be done
- If controls are overlapped in localised report a bug (if reference is
available it is used just to say if this overlapping happens in reference
also)
**How is it checked**
Various tests are performed on each pair of controls to see if any of the
above conditions are met. The most specific tests that can be performed are
done 1st so that the bugs reported are as specific as possible. I.e. we report
that 2 controls are not exactly overlapped when they should be rather than jut
reporting that they are overlapped which contains less information.
**When is a bug reported**
A bug is reported when:
- controls are overlapped (but not contained wholly, and not exactly
overlapped)
- reference controls are exactly overlapped but they are not in tested
dialog
- one reference control is wholly contained in another but not in
tested dialog
**Bug Extra Information**
This test produces 3 different types of bug:
BugType: "Overlapping"
Name Description
OverlappedRect <What this info is>, rectangle
**BugType - "NotContainedOverlap"**
There is no extra information associated with this bug type
**BugType - "NotExactOverlap"**
There is no extra information associated with this bug type
**Is Reference dialog needed**
For checking whether controls should be exactly overlapped and whether they
should be wholly contained the reference controls are necessary. If the
reference controls are not available then only simple overlapping of controls
will be checked.
**False positive bug reports**
If there are controls in the dialog that are not visible or are moved
dynamically it may cause bugs to be reported that do not need to be logged.
If necessary filter out bugs with hidden controls.
**Test Identifier**
The identifier for this test is "Overlapping"
"""
testname = "Overlapping"
#====================================================================
def OverlappingTest(windows):
"""Return the repeated hotkey errors"""
bugs = []
for i, first in enumerate(windows[:-1]):
first_rect = first.rectangle()
if first.ref:
first_ref_rect = first.ref.rectangle()
for second in windows[i+1:]:
second_rect = second.rectangle()
# if the reference controls are available
if first.ref and second.ref:
second_ref_rect = second.ref.rectangle()
if first_ref_rect == second_ref_rect and \
not first_rect == second_rect:
bugs.append(([first, second], {}, "NotExactOverlap", 0))
elif _ContainedInOther(first_ref_rect,second_ref_rect) and \
not _ContainedInOther(first_rect, second_rect):
bugs.append(
([first, second], {}, "NotContainedOverlap", 0))
if _Overlapped(first_rect, second_rect) and \
not _ContainedInOther(first_rect, second_rect) and \
not first_rect == second_rect:
ovlRect = _OverlapRect(first_rect, second_rect)
isInRef = -1
if first.ref and second.ref:
isInRef = 0
if _Overlapped(first_ref_rect, second_ref_rect):
isInRef = 1
bugs.append((
[first, second],
{"OverlappedRect":ovlRect},
testname,
isInRef))
return bugs
#====================================================================
def _ContainedInOther(rect1, rect2):
"""Return true if one rectangle completely contains the other"""
# check if rect2 is inside rect1
if rect1.left >= rect2.left and \
rect1.top >= rect2.top and \
rect1.right <= rect2.right and \
rect1.bottom <= rect2.bottom:
return True
# check if rect1 is inside rect2
elif rect2.left >= rect1.left and \
rect2.top >= rect1.top and \
rect2.right <= rect1.right and \
rect2.bottom <= rect1.bottom:
return True
# no previous return - so must not be included
return False
def _Overlapped(rect1, rect2):
"""Return true if the two rectangles are overlapped"""
ovlRect = _OverlapRect(rect1, rect2)
# if it is actually a bug
if ovlRect.left < ovlRect.right and ovlRect.top < ovlRect.bottom:
# make sure that the rectangle is the 'right way around :-)'
return True
return False
# Case 1: L2 between L1 and R1 -> max(L1, L2) < min(R1, R2)
#
# L1 R1
# ---------------
# L2 R2
# --------------
#
# Case 2: R2 outside L1 and R1 -> NOT max(L1, L2) < min(R1, R2)
#
# L1 R1
# -------------
# L2 R2
# ------------
#
class OptRect(object): pass
def _OverlapRect (rect1, rect2):
"""check whether the 2 rectangles are actually overlapped"""
ovlRect = OptRect()
ovlRect.left = max(rect1.left, rect2.left)
ovlRect.right = min(rect1.right, rect2.right)
ovlRect.top = max(rect1.top, rect2.top)
ovlRect.bottom = min(rect1.bottom, rect2.bottom)
return ovlRect
|
bsd-3-clause
|
orbitfp7/horizon
|
openstack_dashboard/dashboards/identity/users/forms.py
|
5
|
9681
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.forms import ValidationError # noqa
from django import http
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
PROJECT_REQUIRED = api.keystone.VERSIONS.active < 3
class BaseUserForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(BaseUserForm, self).__init__(request, *args, **kwargs)
# Populate project choices
project_choices = []
# If the user is already set (update action), list only projects which
# the user has access to.
user_id = kwargs['initial'].get('id', None)
domain_id = kwargs['initial'].get('domain_id', None)
projects, has_more = api.keystone.tenant_list(request,
domain=domain_id,
user=user_id)
for project in projects:
if project.enabled:
project_choices.append((project.id, project.name))
if not project_choices:
project_choices.insert(0, ('', _("No available projects")))
elif len(project_choices) > 1:
project_choices.insert(0, ('', _("Select a project")))
self.fields['project'].choices = project_choices
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'password' in data:
if data['password'] != data.get('confirm_password', None):
raise ValidationError(_('Passwords do not match.'))
return data
ADD_PROJECT_URL = "horizon:identity:projects:create"
class CreateUserForm(BaseUserForm):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("User Name"))
email = forms.EmailField(
label=_("Email"),
required=False)
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False))
project = forms.DynamicChoiceField(label=_("Primary Project"),
required=PROJECT_REQUIRED,
add_item_link=ADD_PROJECT_URL)
role_id = forms.ChoiceField(label=_("Role"),
required=PROJECT_REQUIRED)
no_autocomplete = True
def __init__(self, *args, **kwargs):
roles = kwargs.pop('roles')
super(CreateUserForm, self).__init__(*args, **kwargs)
role_choices = [(role.id, role.name) for role in roles]
self.fields['role_id'].choices = role_choices
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data')
def handle(self, request, data):
domain = api.keystone.get_default_domain(self.request)
try:
LOG.info('Creating user with name "%s"' % data['name'])
if "email" in data:
data['email'] = data['email'] or None
new_user = api.keystone.user_create(request,
name=data['name'],
email=data['email'],
password=data['password'],
project=data['project'],
enabled=True,
domain=domain.id)
messages.success(request,
_('User "%s" was successfully created.')
% data['name'])
if data['project'] and data['role_id']:
roles = api.keystone.roles_for_user(request,
new_user.id,
data['project']) or []
assigned = [role for role in roles if role.id == str(
data['role_id'])]
if not assigned:
try:
api.keystone.add_tenant_user_role(request,
data['project'],
new_user.id,
data['role_id'])
except Exception:
exceptions.handle(request,
_('Unable to add user '
'to primary project.'))
return new_user
except exceptions.Conflict:
msg = _('User name "%s" is already used.') % data['name']
messages.error(request, msg)
except Exception:
exceptions.handle(request, _('Unable to create user.'))
class UpdateUserForm(BaseUserForm):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
name = forms.CharField(max_length=255, label=_("User Name"))
email = forms.EmailField(
label=_("Email"),
required=False)
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
required=False,
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False),
required=False)
project = forms.ChoiceField(label=_("Primary Project"),
required=PROJECT_REQUIRED)
no_autocomplete = True
def __init__(self, request, *args, **kwargs):
super(UpdateUserForm, self).__init__(request, *args, **kwargs)
if api.keystone.keystone_can_edit_user() is False:
for field in ('name', 'email', 'password', 'confirm_password'):
self.fields.pop(field)
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data', 'password')
def handle(self, request, data):
user = data.pop('id')
# Throw away the password confirmation, we're done with it.
data.pop('confirm_password', None)
data.pop('domain_id')
data.pop('domain_name')
try:
if "email" in data:
data['email'] = data['email'] or None
response = api.keystone.user_update(request, user, **data)
messages.success(request,
_('User has been updated successfully.'))
except exceptions.Conflict:
msg = _('User name "%s" is already used.') % data['name']
messages.error(request, msg)
return False
except Exception:
response = exceptions.handle(request, ignore=True)
messages.error(request, _('Unable to update the user.'))
if isinstance(response, http.HttpResponse):
return response
else:
return True
|
apache-2.0
|
memtoko/django
|
tests/template_tests/filter_tests/test_chaining.py
|
345
|
3940
|
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class ChainingTests(SimpleTestCase):
"""
Chaining safeness-preserving filters should not alter the safe status.
"""
@setup({'chaining01': '{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}'})
def test_chaining01(self):
output = self.engine.render_to_string('chaining01', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, ' A < b . A < b ')
@setup({'chaining02':
'{% autoescape off %}{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}{% endautoescape %}'})
def test_chaining02(self):
output = self.engine.render_to_string('chaining02', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, ' A < b . A < b ')
# Using a filter that forces a string back to unsafe:
@setup({'chaining03': '{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}'})
def test_chaining03(self):
output = self.engine.render_to_string('chaining03', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, 'A < .A < ')
@setup({'chaining04':
'{% autoescape off %}{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}{% endautoescape %}'})
def test_chaining04(self):
output = self.engine.render_to_string('chaining04', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, 'A < .A < ')
# Using a filter that forces safeness does not lead to double-escaping
@setup({'chaining05': '{{ a|escape|capfirst }}'})
def test_chaining05(self):
output = self.engine.render_to_string('chaining05', {'a': 'a < b'})
self.assertEqual(output, 'A < b')
@setup({'chaining06': '{% autoescape off %}{{ a|escape|capfirst }}{% endautoescape %}'})
def test_chaining06(self):
output = self.engine.render_to_string('chaining06', {'a': 'a < b'})
self.assertEqual(output, 'A < b')
# Force to safe, then back (also showing why using force_escape too
# early in a chain can lead to unexpected results).
@setup({'chaining07': '{{ a|force_escape|cut:";" }}'})
def test_chaining07(self):
output = self.engine.render_to_string('chaining07', {'a': 'a < b'})
self.assertEqual(output, 'a &lt b')
@setup({'chaining08': '{% autoescape off %}{{ a|force_escape|cut:";" }}{% endautoescape %}'})
def test_chaining08(self):
output = self.engine.render_to_string('chaining08', {'a': 'a < b'})
self.assertEqual(output, 'a < b')
@setup({'chaining09': '{{ a|cut:";"|force_escape }}'})
def test_chaining09(self):
output = self.engine.render_to_string('chaining09', {'a': 'a < b'})
self.assertEqual(output, 'a < b')
@setup({'chaining10': '{% autoescape off %}{{ a|cut:";"|force_escape }}{% endautoescape %}'})
def test_chaining10(self):
output = self.engine.render_to_string('chaining10', {'a': 'a < b'})
self.assertEqual(output, 'a < b')
@setup({'chaining11': '{{ a|cut:"b"|safe }}'})
def test_chaining11(self):
output = self.engine.render_to_string('chaining11', {'a': 'a < b'})
self.assertEqual(output, 'a < ')
@setup({'chaining12': '{% autoescape off %}{{ a|cut:"b"|safe }}{% endautoescape %}'})
def test_chaining12(self):
output = self.engine.render_to_string('chaining12', {'a': 'a < b'})
self.assertEqual(output, 'a < ')
@setup({'chaining13': '{{ a|safe|force_escape }}'})
def test_chaining13(self):
output = self.engine.render_to_string('chaining13', {"a": "a < b"})
self.assertEqual(output, 'a < b')
@setup({'chaining14': '{% autoescape off %}{{ a|safe|force_escape }}{% endautoescape %}'})
def test_chaining14(self):
output = self.engine.render_to_string('chaining14', {"a": "a < b"})
self.assertEqual(output, 'a < b')
|
bsd-3-clause
|
matmutant/sl4a
|
python/src/Lib/json/tests/test_scanstring.py
|
50
|
3738
|
import sys
import decimal
from unittest import TestCase
import json
import json.decoder
class TestScanString(TestCase):
def test_py_scanstring(self):
self._test_scanstring(json.decoder.py_scanstring)
def test_c_scanstring(self):
self._test_scanstring(json.decoder.c_scanstring)
def _test_scanstring(self, scanstring):
self.assertEquals(
scanstring('"z\\ud834\\udd20x"', 1, None, True),
(u'z\U0001d120x', 16))
if sys.maxunicode == 65535:
self.assertEquals(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 6))
else:
self.assertEquals(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 5))
self.assertEquals(
scanstring('"\\u007b"', 1, None, True),
(u'{', 8))
self.assertEquals(
scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
(u'A JSON payload should be an object or array, not a string.', 60))
self.assertEquals(
scanstring('["Unclosed array"', 2, None, True),
(u'Unclosed array', 17))
self.assertEquals(
scanstring('["extra comma",]', 2, None, True),
(u'extra comma', 14))
self.assertEquals(
scanstring('["double extra comma",,]', 2, None, True),
(u'double extra comma', 21))
self.assertEquals(
scanstring('["Comma after the close"],', 2, None, True),
(u'Comma after the close', 24))
self.assertEquals(
scanstring('["Extra close"]]', 2, None, True),
(u'Extra close', 14))
self.assertEquals(
scanstring('{"Extra comma": true,}', 2, None, True),
(u'Extra comma', 14))
self.assertEquals(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
(u'Extra value after close', 26))
self.assertEquals(
scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
(u'Illegal expression', 21))
self.assertEquals(
scanstring('{"Illegal invocation": alert()}', 2, None, True),
(u'Illegal invocation', 21))
self.assertEquals(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
(u'Numbers cannot have leading zeroes', 37))
self.assertEquals(
scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
(u'Numbers cannot be hex', 24))
self.assertEquals(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
(u'Too deep', 30))
self.assertEquals(
scanstring('{"Missing colon" null}', 2, None, True),
(u'Missing colon', 16))
self.assertEquals(
scanstring('{"Double colon":: null}', 2, None, True),
(u'Double colon', 15))
self.assertEquals(
scanstring('{"Comma instead of colon", null}', 2, None, True),
(u'Comma instead of colon', 25))
self.assertEquals(
scanstring('["Colon instead of comma": false]', 2, None, True),
(u'Colon instead of comma', 25))
self.assertEquals(
scanstring('["Bad value", truth]', 2, None, True),
(u'Bad value', 12))
def test_issue3623(self):
self.assertRaises(ValueError, json.decoder.scanstring, b"xxx", 1,
"xxx")
self.assertRaises(UnicodeDecodeError,
json.encoder.encode_basestring_ascii, b"xx\xff")
|
apache-2.0
|
saiwing-yeung/scikit-learn
|
examples/calibration/plot_calibration.py
|
66
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see https://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
foss-transportationmodeling/rettina-server
|
.env/lib/python2.7/site-packages/sqlalchemy/sql/expression.py
|
21
|
5668
|
# sql/expression.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the public namespace for SQL expression constructs.
Prior to version 0.9, this module contained all of "elements", "dml",
"default_comparator" and "selectable". The module was broken up
and most "factory" functions were moved to be grouped with their associated
class.
"""
__all__ = [
'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
'tuple_', 'type_coerce', 'union', 'union_all', 'update']
from .visitors import Visitable
from .functions import func, modifier, FunctionElement, Function
from ..util.langhelpers import public_factory
from .elements import ClauseElement, ColumnElement,\
BindParameter, UnaryExpression, BooleanClauseList, \
Label, Cast, Case, ColumnClause, TextClause, Over, Null, \
True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \
Grouping, not_, \
collate, literal_column, between,\
literal, outparam, type_coerce, ClauseList
from .elements import SavepointClause, RollbackToSavepointClause, \
ReleaseSavepointClause
from .base import ColumnCollection, Generative, Executable, \
PARSE_AUTOCOMMIT
from .selectable import Alias, Join, Select, Selectable, TableClause, \
CompoundSelect, CTE, FromClause, FromGrouping, SelectBase, \
alias, GenerativeSelect, \
subquery, HasPrefixes, Exists, ScalarSelect, TextAsFrom
from .dml import Insert, Update, Delete, UpdateBase, ValuesBase
# factory functions - these pull class-bound constructors and classmethods
# from SQL elements and selectables into public functions. This allows
# the functions to be available in the sqlalchemy.sql.* namespace and
# to be auto-cross-documenting from the function to the class itself.
and_ = public_factory(BooleanClauseList.and_, ".expression.and_")
or_ = public_factory(BooleanClauseList.or_, ".expression.or_")
bindparam = public_factory(BindParameter, ".expression.bindparam")
select = public_factory(Select, ".expression.select")
text = public_factory(TextClause._create_text, ".expression.text")
table = public_factory(TableClause, ".expression.table")
column = public_factory(ColumnClause, ".expression.column")
over = public_factory(Over, ".expression.over")
label = public_factory(Label, ".expression.label")
case = public_factory(Case, ".expression.case")
cast = public_factory(Cast, ".expression.cast")
extract = public_factory(Extract, ".expression.extract")
tuple_ = public_factory(Tuple, ".expression.tuple_")
except_ = public_factory(CompoundSelect._create_except, ".expression.except_")
except_all = public_factory(
CompoundSelect._create_except_all, ".expression.except_all")
intersect = public_factory(
CompoundSelect._create_intersect, ".expression.intersect")
intersect_all = public_factory(
CompoundSelect._create_intersect_all, ".expression.intersect_all")
union = public_factory(CompoundSelect._create_union, ".expression.union")
union_all = public_factory(
CompoundSelect._create_union_all, ".expression.union_all")
exists = public_factory(Exists, ".expression.exists")
nullsfirst = public_factory(
UnaryExpression._create_nullsfirst, ".expression.nullsfirst")
nullslast = public_factory(
UnaryExpression._create_nullslast, ".expression.nullslast")
asc = public_factory(UnaryExpression._create_asc, ".expression.asc")
desc = public_factory(UnaryExpression._create_desc, ".expression.desc")
distinct = public_factory(
UnaryExpression._create_distinct, ".expression.distinct")
true = public_factory(True_._singleton, ".expression.true")
false = public_factory(False_._singleton, ".expression.false")
null = public_factory(Null._singleton, ".expression.null")
join = public_factory(Join._create_join, ".expression.join")
outerjoin = public_factory(Join._create_outerjoin, ".expression.outerjoin")
insert = public_factory(Insert, ".expression.insert")
update = public_factory(Update, ".expression.update")
delete = public_factory(Delete, ".expression.delete")
# internal functions still being called from tests and the ORM,
# these might be better off in some other namespace
from .base import _from_objects
from .elements import _literal_as_text, _clause_element_as_expr,\
_is_column, _labeled, _only_column_elements, _string_or_unprintable, \
_truncated_label, _clone, _cloned_difference, _cloned_intersection,\
_column_as_key, _literal_as_binds, _select_iterables, \
_corresponding_column_or_error
from .selectable import _interpret_as_from
# old names for compatibility
_Executable = Executable
_BindParamClause = BindParameter
_Label = Label
_SelectBase = SelectBase
_BinaryExpression = BinaryExpression
_Cast = Cast
_Null = Null
_False = False_
_True = True_
_TextClause = TextClause
_UnaryExpression = UnaryExpression
_Case = Case
_Tuple = Tuple
_Over = Over
_Generative = Generative
_TypeClause = TypeClause
_Extract = Extract
_Exists = Exists
_Grouping = Grouping
_FromGrouping = FromGrouping
_ScalarSelect = ScalarSelect
|
apache-2.0
|
Bionetbook/bionetbook
|
bnbapp/bionetbook/wsgi.py
|
2
|
1142
|
"""
WSGI config for bionetbook project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bionetbook.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
mit
|
Hellowlol/plexpy
|
lib/enum/test_enum.py
|
95
|
62283
|
import enum
import sys
import unittest
from enum import Enum, IntEnum, unique, EnumMeta
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
pyver = float('%s.%s' % sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
unicode
except NameError:
unicode = str
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
Stooges = sys.exc_info()[1]
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
IntStooges = sys.exc_info()[1]
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception:
FloatStooges = sys.exc_info()[1]
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception:
Name = sys.exc_info()[1]
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception:
Question = sys.exc_info()[1]
try:
Answer = Enum('Answer', 'him this then there because')
except Exception:
Answer = sys.exc_info()[1]
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception:
Theory = sys.exc_info()[1]
# for doctests
try:
class Fruit(Enum):
tomato = 1
banana = 2
cherry = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
if target is None:
assertion(loads(dumps(source, protocol=protocol)) is source)
else:
assertion(loads(dumps(source, protocol=protocol)), target)
except Exception:
exc, tb = sys.exc_info()[1:]
failures.append('%2d: %s' %(protocol, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
def test_pickle_exception(assertion, exception, obj,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
assertion(exception, dumps, obj, protocol=protocol)
except Exception:
exc = sys.exc_info()[1]
failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
if pyver >= 2.6: # cannot specify custom `dir` on previous versions
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
if pyver >= 2.7: # OrderedDict first available here
def test_members_is_ordereddict_if_ordered(self):
class Ordered(Enum):
__order__ = 'first second third'
first = 'bippity'
second = 'boppity'
third = 'boo'
self.assertTrue(type(Ordered.__members__) is OrderedDict)
def test_members_is_ordereddict_if_not_ordered(self):
class Unordered(Enum):
this = 'that'
these = 'those'
self.assertTrue(type(Unordered.__members__) is OrderedDict)
if pyver >= 3.0: # all objects are ordered in Python 2.x
def test_members_is_always_ordered(self):
class AlwaysOrdered(Enum):
first = 1
second = 2
third = 3
self.assertTrue(type(AlwaysOrdered.__members__) is OrderedDict)
def test_comparisons(self):
def bad_compare():
Season.SPRING > 4
Season = self.Season
self.assertNotEqual(Season.SPRING, 1)
self.assertRaises(TypeError, bad_compare)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
def bad_compare():
Season.SPRING < Part.CLIP
self.assertRaises(TypeError, bad_compare)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertTrue(Season(Season.WINTER) is Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()):
i += 1
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertTrue(e in Season)
self.assertTrue(type(e) is Season)
self.assertTrue(isinstance(e, Season))
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.%s: %s>' % (season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
def set_name(obj, new_value):
obj.name = new_value
def set_value(obj, new_value):
obj.value = new_value
self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', )
self.assertRaises(AttributeError, set_value, Season.SPRING, 2)
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
self.assertRaises(AttributeError, delattr, Season, 'SPRING')
self.assertRaises(AttributeError, delattr, Season, 'DRY')
self.assertRaises(AttributeError, delattr, Season.SPRING, 'name')
def test_invalid_names(self):
def create_bad_class_1():
class Wrong(Enum):
mro = 9
def create_bad_class_2():
class Wrong(Enum):
_reserved_ = 3
self.assertRaises(ValueError, create_bad_class_1)
self.assertRaises(ValueError, create_bad_class_2)
def test_contains(self):
Season = self.Season
self.assertTrue(Season.AUTUMN in Season)
self.assertTrue(3 not in Season)
val = Season(3)
self.assertTrue(val in Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertTrue(OtherEnum.two not in Season)
if pyver >= 2.6: # when `format` came into being
def test_format_enum(self):
Season = self.Season
self.assertEqual('{0}'.format(Season.SPRING),
'{0}'.format(str(Season.SPRING)))
self.assertEqual( '{0:}'.format(Season.SPRING),
'{0:}'.format(str(Season.SPRING)))
self.assertEqual('{0:20}'.format(Season.SPRING),
'{0:20}'.format(str(Season.SPRING)))
self.assertEqual('{0:^20}'.format(Season.SPRING),
'{0:^20}'.format(str(Season.SPRING)))
self.assertEqual('{0:>20}'.format(Season.SPRING),
'{0:>20}'.format(str(Season.SPRING)))
self.assertEqual('{0:<20}'.format(Season.SPRING),
'{0:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{0}', Konstants.TAU)
self.assertFormatIsValue('{0:}', Konstants.TAU)
self.assertFormatIsValue('{0:20}', Konstants.TAU)
self.assertFormatIsValue('{0:^20}', Konstants.TAU)
self.assertFormatIsValue('{0:>20}', Konstants.TAU)
self.assertFormatIsValue('{0:<20}', Konstants.TAU)
self.assertFormatIsValue('{0:n}', Konstants.TAU)
self.assertFormatIsValue('{0:5.2}', Konstants.TAU)
self.assertFormatIsValue('{0:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{0}', Grades.C)
self.assertFormatIsValue('{0:}', Grades.C)
self.assertFormatIsValue('{0:20}', Grades.C)
self.assertFormatIsValue('{0:^20}', Grades.C)
self.assertFormatIsValue('{0:>20}', Grades.C)
self.assertFormatIsValue('{0:<20}', Grades.C)
self.assertFormatIsValue('{0:+}', Grades.C)
self.assertFormatIsValue('{0:08X}', Grades.C)
self.assertFormatIsValue('{0:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{0}', Directional.WEST)
self.assertFormatIsValue('{0:}', Directional.WEST)
self.assertFormatIsValue('{0:20}', Directional.WEST)
self.assertFormatIsValue('{0:^20}', Directional.WEST)
self.assertFormatIsValue('{0:>20}', Directional.WEST)
self.assertFormatIsValue('{0:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_enum_duplicates(self):
__order__ = "SPRING SUMMER AUTUMN WINTER"
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertTrue(Season.FALL is Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertTrue(Season(3) is Season.AUTUMN)
self.assertTrue(Season(1) is Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
set([k for k,v in Season.__members__.items() if v.name != k]),
set(['FALL', 'ANOTHER_SPRING']),
)
if pyver >= 3.0:
cls = vars()
result = {'Enum':Enum}
exec("""def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3""",
result)
cls['test_duplicate_name'] = result['test_duplicate_name']
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertTrue(type(Huh.name) is Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target):
i += 1
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertTrue(e in WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertTrue(type(e) is WeekDay)
self.assertTrue(isinstance(e, int))
self.assertTrue(isinstance(e, Enum))
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
__order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertTrue, Stooges.CURLY)
test_pickle_dump_load(self.assertTrue, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
test_pickle_dump_load(self.assertTrue, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
test_pickle_dump_load(self.assertTrue, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertTrue, Answer.him)
test_pickle_dump_load(self.assertTrue, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertTrue, Question.who)
test_pickle_dump_load(self.assertTrue, Question)
if pyver >= 3.4:
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_exception(
self.assertRaises, PicklingError, self.NestedEnum.twigs,
protocol=(0, 3))
test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
protocol=(4, HIGHEST_PROTOCOL))
def test_exploding_pickle(self):
BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
enum._make_class_unpicklable(BadPickle)
globals()['BadPickle'] = BadPickle
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertTrue(Period(2) is Period.noon)
self.assertTrue(getattr(Period, 'night') is Period.night)
self.assertTrue(Period['morning'] is Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__hash__'))
def test_iteration_order(self):
class Season(Enum):
__order__ = 'SUMMER WINTER AUTUMN SPRING'
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_iteration_order_with_unorderable_values(self):
class Complex(Enum):
a = complex(7, 9)
b = complex(3.14, 2)
c = complex(1, -1)
d = complex(-77, 32)
self.assertEqual(
list(Complex),
[Complex.a, Complex.b, Complex.c, Complex.d],
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_list(self):
SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_iterable(self):
SummerMonth = Enum(
'SummerMonth',
((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_unicode_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programmatic_function_unicode_class(self):
if pyver < 3.0:
class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1')
else:
class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth'
for i, class_name in enumerate(class_names):
if pyver < 3.0 and i == 1:
self.assertRaises(TypeError, Enum, class_name, unicode('june july august'))
else:
SummerMonth = Enum(class_name, unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e.value, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertTrue, Name.BDFL)
def test_extending(self):
def bad_extension():
class Color(Enum):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertFalse(type(whatever.really) is whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
def wrong_inherit():
class Wrong(Enum, str):
NotHere = 'error before this point'
self.assertRaises(TypeError, wrong_inherit)
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertTrue(Number.one._member_type_ is int)
self.assertTrue(Number._member_type_ is int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertTrue(String.yarn._member_type_ is str)
self.assertTrue(String._member_type_ is str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertTrue(Plain.vanilla._member_type_ is object)
self.assertTrue(Plain._member_type_ is object)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.male) is Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
self.assertRaises(ValueError, Color, 4)
self.assertRaises(KeyError, Color.__getitem__, 'chartreuse')
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(EnumMeta):
def __new__(metacls, cls, bases, classdict):
original_dict = classdict
classdict = enum._EnumDict()
for k, v in original_dict.items():
classdict[k] = v
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v == ():
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {})
AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {})
class TestAutoNumber(AutoNumberedEnum):
a = ()
b = 3
c = ()
class TestAutoInt(AutoIntEnum):
a = ()
b = 3
c = ()
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertTrue, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
if pyver >= 3.4:
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 2:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL))
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL))
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
test_pickle_dump_load(self.assertTrue, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple'
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertTrue(type(SomeTuple.first) is SomeTuple)
self.assertTrue(isinstance(SomeTuple.second, tuple))
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertTrue, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
__order__ = 'enum_m enum_d enum_y'
enum_m = ()
enum_d = ()
enum_y = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(int(AutoNumber.enum_d), 2)
self.assertEqual(AutoNumber.enum_y.value, 3)
self.assertTrue(AutoNumber(1) is AutoNumber.enum_m)
self.assertEqual(
list(AutoNumber),
[AutoNumber.enum_m, AutoNumber.enum_d, AutoNumber.enum_y],
)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber2(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber2):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
if pyver >= 3.0:
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber3(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber3):
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
Color.red
Color.green
Color.blue
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
__order__ = 'A B C D F'
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F])
self.assertTrue(Grade.A > Grade.B)
self.assertTrue(Grade.F <= Grade.C)
self.assertTrue(Grade.D < Grade.A)
self.assertTrue(Grade.B >= Grade.B)
def test_extending2(self):
def bad_extension():
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_no_duplicates(self):
def bad_duplicates():
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
self.assertRaises(ValueError, bad_duplicates)
def test_reversed(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
self.assertEqual(ColorInAList.red.value, [1])
self.assertEqual(ColorInAList([1]), ColorInAList.red)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
class TestUnique(unittest.TestCase):
"""2.4 doesn't allow class decorators, use function syntax."""
def test_unique_clean(self):
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
unique(Clean)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
unique(Cleaner)
def test_unique_dirty(self):
try:
class Dirty(Enum):
__order__ = 'one two tres'
one = 1
two = 'dos'
tres = 1
unique(Dirty)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('tres -> one' in message)
try:
class Dirtier(IntEnum):
__order__ = 'single double triple turkey'
single = 1
double = 1
triple = 3
turkey = 3
unique(Dirtier)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('double -> single' in message)
self.assertTrue('turkey -> triple' in message)
class TestMe(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
houlixin/BBB-TISDK
|
linux-devkit/sysroots/cortexa8t2hf-vfp-neon-linux-gnueabi/usr/lib/python2.7/site-packages/dbus/mainloop/glib.py
|
10
|
1773
|
# Copyright (C) 2004 Anders Carlsson
# Copyright (C) 2004-2006 Red Hat Inc. <http://www.redhat.com/>
# Copyright (C) 2005-2006 Collabora Ltd. <http://www.collabora.co.uk/>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""GLib main loop integration using libdbus-glib."""
__all__ = ('DBusGMainLoop', 'threads_init')
from _dbus_glib_bindings import DBusGMainLoop, gthreads_init
_dbus_gthreads_initialized = False
def threads_init():
"""Initialize threads in dbus-glib, if this has not already been done.
This must be called before creating a second thread in a program that
uses this module.
"""
global _dbus_gthreads_initialized
if not _dbus_gthreads_initialized:
gthreads_init()
_dbus_gthreads_initialized = True
|
gpl-2.0
|
darkenk/scripts
|
android/project_creator/venv/lib/python3.5/site-packages/pip/_vendor/pkg_resources/__init__.py
|
320
|
103230
|
# coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
import itertools
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*' + part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (None,)
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version == self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.6 and 3.2 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(os.listdir(path_item))
for entry in path_item_entries:
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
if len(os.listdir(fullpath)) == 0:
# Empty egg directory, skip.
continue
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs = []
elif not evaluate_marker(marker):
reqs = []
extra = safe_extra(extra) or None
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object):
pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
dist = None # ensure dist is defined for del dist below
for dist in working_set:
dist.activate(replace=False)
del dist
add_activation_listener(lambda dist: dist.activate(replace=True), existing=False)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
|
bsd-3-clause
|
s-gv/rnicu-webapp
|
ecg/ecg_visualizer_ble_PC/visualizer.py
|
3
|
1891
|
'''
This program visualizes ECG waveforms on the PC
Copyright (C) 2014 Sagar G V (sagar.writeme@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import CC2540ble as ble
import streamplot
def main():
print "Connecting to BLE Dongle . . ."
bt = ble.BTDongle(port='/dev/ttyACM0')
print "Discovering BLE devices in the vicinity . . ."
devs = bt.discover()
print "BLE Devices found: ", devs
print "Changing conncection parameters . . ."
bt.changeConnectionSettings()
print "Establishing link to the first device found . . ."
print bt.link(devs[0])
print "Enabling notifications . . ."
print bt.enableNotifications('0x002F')
ecgPlot = streamplot.StreamPlot(saveFileNameStart = "ecg_plot",lines = [('l','r','ecg')], exitforce=True)
ecgPlot.addDataPoint(0, [0])
Tsample = 1/400.0 # in seconds
t = 0
for evt in bt.pollNotifications():
if len(evt) == 16:
f2s = lambda x: x if x < 2**13 or x >= 65530 else (-2**14 + x)
vals = [ f2s(lsb + 256*msb) for (lsb, msb) in zip(evt[::2], evt[1::2]) ]
vals = [ val for val in vals if val < 65530 ]
for val in vals:
t += Tsample
ecgPlot.addDataPoint(t, [val])
if __name__ == '__main__':
main()
|
agpl-3.0
|
opencord/xos
|
scripts/add_copyright.py
|
2
|
8212
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
COPYRIGHTS = {
"slash": """
/*
* Copyright 2017-present Open Networking Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
""",
"hash": """
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""",
"html": """
<!--
Copyright 2017-present Open Networking Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
""",
"jinja": """
{#
Copyright 2017-present Open Networking Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#}
""",
"ini": """
;Copyright 2017-present Open Networking Foundation
;
;Licensed under the Apache License, Version 2.0 (the "License");
;you may not use this file except in compliance with the License.
;You may obtain a copy of the License at
;
;http://www.apache.org/licenses/LICENSE-2.0
;
;Unless required by applicable law or agreed to in writing, software
;distributed under the License is distributed on an "AS IS" BASIS,
;WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;See the License for the specific language governing permissions and
;limitations under the License.
""",
}
EXT_MAPPING = {
".js": COPYRIGHTS["slash"],
".ts": COPYRIGHTS["slash"],
".scss": COPYRIGHTS["slash"],
".css": COPYRIGHTS["slash"],
".gradle": COPYRIGHTS["slash"],
"docker": COPYRIGHTS["hash"],
".py": COPYRIGHTS["hash"],
".model": COPYRIGHTS["hash"], # attic files
".sh": COPYRIGHTS["hash"],
".yaml": COPYRIGHTS["hash"],
".yml": COPYRIGHTS["hash"],
".m4": COPYRIGHTS["hash"],
".sql": COPYRIGHTS["hash"],
".html": COPYRIGHTS["html"],
".j2": COPYRIGHTS["jinja"],
".ini": COPYRIGHTS["ini"],
}
def get_copyright(file):
name, ext = os.path.splitext(file)
if "Dockerfile" in name:
return EXT_MAPPING["docker"]
try:
return EXT_MAPPING[ext]
except KeyError, e:
print "Missing copyright for file of type: %s" % ext
return None
def add_copyright(file):
with open(file, 'r') as original: data = original.read()
if not "Copyright 2017-present Open Networking Foundation" in data:
print "Adding copyright to: %s" % file
copy = get_copyright(file)
if copy:
with open(file, 'w') as modified: modified.write(copy + "\n\n" + data)
return
def get_files_ignore_by_git(root):
# NOTE this is not perfect, some file will still be copyrighted, but we save some time
if root == ".gitignore":
gitignore = root
else:
gitignore = os.path.join(root, ".gitignore")
exclusion_list = []
if os.path.exists(gitignore):
for line in open(gitignore).readlines():
if not "#" in line:
line = line.strip()
if line.endswith("/"):
line = line.replace("/", "")
exclusion_list.append(line)
return exclusion_list
def should_skip(entry):
# do not skip directories
if os.path.isdir(entry):
return False
if "LICENSE.txt" in entry \
or ".git" in entry \
or ".idea" in entry:
return True
name, ext = os.path.splitext(entry)
if not ext or ext == "" \
or ext ==".pyc" \
or ext == ".txt" \
or ext == ".in" \
or ext == ".crt" \
or ext == ".unused" \
or ext == ".list" \
or ext == ".README" \
or ext == ".json" \
or ext == ".log" \
or ext == ".asc" \
or ext == ".dot" \
or ext == ".do" \
or ext == ".template" \
or ext == ".svg" \
or ext == ".ttf" \
or ext == ".woff" \
or ext == ".woof2" \
or ext == ".eot" \
or ext == ".md" \
or ext == ".png" \
or ext == ".PNG" \
or ext == ".jpg" \
or ext == ".gif" \
or ext == ".ico" \
or ext == ".conf"\
or ext == ".key" \
or ext == ".proto" \
or ext == ".xproto" \
or ext == ".xtarget" \
or ext == ".otf" \
or ext == ".desc":
return True
return False
def recursive_iterate_dirs(source, apply_copyright=True):
# print "Iteranting on: %s" % source
# skipping files in the gitignore
gitignored = get_files_ignore_by_git(source)
entries = []
for entry in os.listdir(source):
if entry in gitignored:
# print "Skipping because gitignored: %s" % entry
continue
entry = os.path.join(source, entry)
if should_skip(entry):
# print "Skipping: %s" % entry
continue
if os.path.isdir(entry):
entries.append(recursive_iterate_dirs(entry, apply_copyright))
elif os.path.isfile(entry):
entries.append(entry)
if apply_copyright is True:
add_copyright(entry)
return entries
def flatten(aList):
t = []
for i in aList:
if not isinstance(i, list):
t.append(i)
else:
t.extend(flatten(i))
return t
def list_file_types(source):
file_types = []
files = flatten(recursive_iterate_dirs(source, apply_copyright=False))
for entry in files:
name, ext = os.path.splitext(entry)
if not ext in file_types:
file_types.append(ext)
print file_types
def main():
if len(sys.argv) < 2:
raise Exception("You must provide a path to the source folder as arguments to the script")
source_root = os.path.abspath(os.path.join(os.getcwd(), sys.argv[1]))
if not os.path.exists(source_root):
raise Exception("You must provide an existing the source folder")
if len(sys.argv) == 3:
list_file_types(source_root)
else:
recursive_iterate_dirs(source_root)
if __name__ == "__main__":
main()
|
apache-2.0
|
SNAPPETITE/backend
|
flask/lib/python2.7/site-packages/sqlalchemy/orm/__init__.py
|
34
|
8033
|
# orm/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from . import exc
from .mapper import (
Mapper,
_mapper_registry,
class_mapper,
configure_mappers,
reconstructor,
validates
)
from .interfaces import (
EXT_CONTINUE,
EXT_STOP,
PropComparator,
)
from .deprecated_interfaces import (
MapperExtension,
SessionExtension,
AttributeExtension,
)
from .util import (
aliased,
join,
object_mapper,
outerjoin,
polymorphic_union,
was_deleted,
with_parent,
with_polymorphic,
)
from .properties import ColumnProperty
from .relationships import RelationshipProperty
from .descriptor_props import (
ComparableProperty,
CompositeProperty,
SynonymProperty,
)
from .relationships import (
foreign,
remote,
)
from .session import (
Session,
object_session,
sessionmaker,
make_transient,
make_transient_to_detached
)
from .scoping import (
scoped_session
)
from . import mapper as mapperlib
from .query import AliasOption, Query, Bundle
from ..util.langhelpers import public_factory
from .. import util as _sa_util
from . import strategies as _strategies
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
relationship = public_factory(RelationshipProperty, ".orm.relationship")
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw['lazy'] = 'dynamic'
return relationship(argument, **kw)
column_property = public_factory(ColumnProperty, ".orm.column_property")
composite = public_factory(CompositeProperty, ".orm.composite")
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the
same arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(
SomeItem, backref=backref('parent', lazy='subquery'))
.. seealso::
:ref:`relationships_backref`
"""
return (name, kwargs)
def deferred(*columns, **kw):
"""Indicate a column-based mapped attribute that by default will
not load unless accessed.
:param \*columns: columns to be mapped. This is typically a single
:class:`.Column` object, however a collection is supported in order
to support multiple columns mapped under the same attribute.
:param \**kw: additional keyword arguments passed to
:class:`.ColumnProperty`.
.. seealso::
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kw)
mapper = public_factory(Mapper, ".orm.mapper")
synonym = public_factory(SynonymProperty, ".orm.synonym")
comparable_property = public_factory(ComparableProperty,
".orm.comparable_property")
@_sa_util.deprecated("0.7", message=":func:`.compile_mappers` "
"is renamed to :func:`.configure_mappers`")
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have
been defined.
"""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class
itself is garbage collected, its mapper is automatically disposed of as
well. As such, :func:`.clear_mappers` is only for usage in test suites
that re-use the same classes with different mappings, which is itself an
extremely rare use case - the only such use case is in fact SQLAlchemy's
own test suite, and possibly the test suites of other ORM extension
libraries which intend to test various combinations of mapper construction
upon a fixed set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
from . import strategy_options
joinedload = strategy_options.joinedload._unbound_fn
joinedload_all = strategy_options.joinedload._unbound_all_fn
contains_eager = strategy_options.contains_eager._unbound_fn
defer = strategy_options.defer._unbound_fn
undefer = strategy_options.undefer._unbound_fn
undefer_group = strategy_options.undefer_group._unbound_fn
load_only = strategy_options.load_only._unbound_fn
lazyload = strategy_options.lazyload._unbound_fn
lazyload_all = strategy_options.lazyload_all._unbound_all_fn
subqueryload = strategy_options.subqueryload._unbound_fn
subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn
immediateload = strategy_options.immediateload._unbound_fn
noload = strategy_options.noload._unbound_fn
defaultload = strategy_options.defaultload._unbound_fn
from .strategy_options import Load
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
contains_alias = public_factory(AliasOption, ".orm.contains_alias")
def __go(lcls):
global __all__
from .. import util as sa_util
from . import dynamic
from . import events
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy.orm")
__go(locals())
|
mit
|
eXistenZNL/SickRage
|
lib/sqlalchemy/dialects/firebird/fdb.py
|
79
|
4365
|
# firebird/fdb.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+fdb
:name: fdb
:dbapi: pyodbc
:connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]
:url: http://pypi.python.org/pypi/fdb/
fdb is a kinterbasdb compatible DBAPI for Firebird.
.. versionadded:: 0.8 - Support for the fdb Firebird driver.
.. versionchanged:: 0.9 - The fdb dialect is now the default dialect
under the ``firebird://`` URL space, as ``fdb`` is now the official
Python driver for Firebird.
Arguments
----------
The ``fdb`` dialect is based on the :mod:`sqlalchemy.dialects.firebird.kinterbasdb`
dialect, however does not accept every argument that Kinterbasdb does.
* ``enable_rowcount`` - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
called, if at all, before the result object is returned. Additionally,
cursor.rowcount may not return correct results with older versions
of Firebird, and setting this flag to False will also cause the
SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
per-execution basis using the ``enable_rowcount`` option with
:meth:`.Connection.execution_options`::
conn = engine.connect().execution_options(enable_rowcount=True)
r = conn.execute(stmt)
print r.rowcount
* ``retaining`` - False by default. Setting this to True will pass the
``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()``
methods of the DBAPI connection, which can improve performance in some
situations, but apparently with significant caveats.
Please read the fdb and/or kinterbasdb DBAPI documentation in order to
understand the implications of this flag.
.. versionadded:: 0.8.2 - ``retaining`` keyword argument specifying
transaction retaining behavior - in 0.8 it defaults to ``True``
for backwards compatibility.
.. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``.
In 0.8 it defaulted to ``True``.
.. seealso::
http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information
on the "retaining" flag.
"""
from .kinterbasdb import FBDialect_kinterbasdb
from ... import util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(self, enable_rowcount=True,
retaining=False, **kwargs):
super(FBDialect_fdb, self).__init__(
enable_rowcount=enable_rowcount,
retaining=retaining, **kwargs)
@classmethod
def dbapi(cls):
return __import__('fdb')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if opts.get('port'):
opts['host'] = "%s/%s" % (opts['host'], opts['port'])
del opts['port']
opts.update(url.query)
util.coerce_kw_type(opts, 'type_conv', int)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
isc_info_firebird_version = 103
fbconn = connection.connection
version = fbconn.db_info(isc_info_firebird_version)
return self._parse_version_info(version)
dialect = FBDialect_fdb
|
gpl-3.0
|
poffuomo/spark
|
examples/src/main/python/ml/n_gram_example.py
|
123
|
1545
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import NGram
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("NGramExample")\
.getOrCreate()
# $example on$
wordDataFrame = spark.createDataFrame([
(0, ["Hi", "I", "heard", "about", "Spark"]),
(1, ["I", "wish", "Java", "could", "use", "case", "classes"]),
(2, ["Logistic", "regression", "models", "are", "neat"])
], ["id", "words"])
ngram = NGram(n=2, inputCol="words", outputCol="ngrams")
ngramDataFrame = ngram.transform(wordDataFrame)
ngramDataFrame.select("ngrams").show(truncate=False)
# $example off$
spark.stop()
|
apache-2.0
|
Arquanite/mcshopgen
|
shopgen.py
|
1
|
2584
|
#!/bin/python3
import os
import yaml
config = ''
subdir = '__output__'
with open("shop.yml", 'r') as stream:
try:
config = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
try:
os.mkdir(subdir)
except Exception:
pass
for shop in config['shop']:
with open(os.path.join(subdir, shop['name'] + config['buy-postfix'] + '.xd'), "w") as text_file:
text_file.write('/summon Villager ~ ~1 ~ {')
text_file.write('Profession:0,')
text_file.write('CustomName:"{0}",'.format(shop['name'] + config['buy-postfix']))
text_file.write('CustomNameVisible:1,Career:1,CareerLevel:42,PersistenceRequired:1,NoAI:1,Silent:1,Invulnerable:1,')
text_file.write('Attributes:[{Name:"generic.knockbackResistance",Base:"1f"},{Name:"generic.movementSpeed",Base:"0f"},{Name:"generic.maxHealth",Base: 99999}],')
text_file.write('Offers:{Recipes:[')
for offer in shop['offer']:
text_file.write('{{buy:{{id:"{0}",Count:{1}'.format(offer['item'], offer['amount']))
if 'type' in offer:
text_file.write(',Damage:{0}'.format(offer['type']))
text_file.write('}},maxUses:9999999,sell:{{id:"{0}",Count:{1}}},rewardExp:false}}'.format(shop['currency'],offer['price']))
if not offer == shop['offer'][-1]:
text_file.write(',')
text_file.write(']},Rotation:[90f,0f]}')
with open(os.path.join(subdir, shop['name'] + config['sell-postfix'] + '.xd'), "w") as text_file:
text_file.write('/summon Villager ~ ~1 ~ {')
text_file.write('Profession:0,')
text_file.write('CustomName:"{0}",'.format(shop['name'] + config['sell-postfix']))
text_file.write('CustomNameVisible:1,Career:1,CareerLevel:42,PersistenceRequired:1,NoAI:1,Silent:1,Invulnerable:1,')
text_file.write('Attributes:[{Name:"generic.knockbackResistance",Base:"1f"},{Name:"generic.movementSpeed",Base:"0f"},{Name:"generic.maxHealth",Base: 99999}],')
text_file.write('Offers:{Recipes:[')
for offer in shop['offer']:
text_file.write('{{buy:{{id:"{0}",Count:{1}'.format(shop['currency'], offer['price']))
text_file.write('}},maxUses:9999999,sell:{{id:"{0}",Count:{1}'.format(offer['item'],offer['amount']))
if 'type' in offer:
text_file.write(',Damage:{0}'.format(offer['type']))
text_file.write('},rewardExp:false}')
if not offer == shop['offer'][-1]:
text_file.write(',')
text_file.write(']},Rotation:[90f,0f]}')
|
gpl-3.0
|
VimVincent/three.js
|
utils/exporters/blender/addons/io_three/exporter/api/material.py
|
124
|
7818
|
from bpy import data, types
from .. import constants, logger
from .constants import MULTIPLY, WIRE, IMAGE
def _material(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Material):
material = name
else:
material = data.materials[name]
return func(material, *args, **kwargs)
return inner
@_material
def ambient_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.ambient_color(%s)", material)
diffuse = diffuse_color(material)
return (material.ambient * diffuse[0],
material.ambient * diffuse[1],
material.ambient * diffuse[2])
@_material
def blending(material):
"""
:param material:
:return: THREE_blending_type value
"""
logger.debug("material.blending(%s)", material)
try:
blend = material.THREE_blending_type
except AttributeError:
logger.debug("No THREE_blending_type attribute found")
blend = constants.NORMAL_BLENDING
return blend
@_material
def bump_map(material):
"""
:param material:
:return: texture node for bump
"""
logger.debug("material.bump_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and not \
texture.texture.use_normal_map:
return texture.texture
@_material
def bump_scale(material):
"""
:param material:
:rtype: float
"""
return normal_scale(material)
@_material
def depth_test(material):
"""
:param material:
:return: THREE_depth_test value
:rtype: bool
"""
logger.debug("material.depth_test(%s)", material)
try:
test = material.THREE_depth_test
except AttributeError:
logger.debug("No THREE_depth_test attribute found")
test = True
return test
@_material
def depth_write(material):
"""
:param material:
:return: THREE_depth_write value
:rtype: bool
"""
logger.debug("material.depth_write(%s)", material)
try:
write = material.THREE_depth_write
except AttributeError:
logger.debug("No THREE_depth_write attribute found")
write = True
return write
@_material
def diffuse_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.diffuse_color(%s)", material)
return (material.diffuse_intensity * material.diffuse_color[0],
material.diffuse_intensity * material.diffuse_color[1],
material.diffuse_intensity * material.diffuse_color[2])
@_material
def diffuse_map(material):
"""
:param material:
:return: texture node for map
"""
logger.debug("material.diffuse_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and not \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def emissive_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.emissive_color(%s)", material)
diffuse = diffuse_color(material)
return (material.emit * diffuse[0],
material.emit * diffuse[1],
material.emit * diffuse[2])
@_material
def light_map(material):
"""
:param material:
:return: texture node for light maps
"""
logger.debug("material.light_map(%s)", material)
for texture in _valid_textures(material, strict_use=False):
if texture.use_map_color_diffuse and \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def normal_scale(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.normal_scale(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal:
return texture.normal_factor
@_material
def normal_map(material):
"""
:param material:
:return: texture node for normals
"""
logger.debug("material.normal_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and \
texture.texture.use_normal_map:
return texture.texture
@_material
def opacity(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.opacity(%s)", material)
return round(material.alpha, 2)
@_material
def shading(material):
"""
:param material:
:return: shading type (phong or lambert)
"""
logger.debug("material.shading(%s)", material)
dispatch = {
True: constants.PHONG,
False: constants.LAMBERT
}
return dispatch[material.specular_intensity > 0.0]
@_material
def specular_coef(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.specular_coef(%s)", material)
return material.specular_hardness
@_material
def specular_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.specular_color(%s)", material)
return (material.specular_intensity * material.specular_color[0],
material.specular_intensity * material.specular_color[1],
material.specular_intensity * material.specular_color[2])
@_material
def specular_map(material):
"""
:param material:
:return: texture node for specular
"""
logger.debug("material.specular_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_specular:
return texture.texture
@_material
def transparent(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.transparent(%s)", material)
return material.use_transparency
@_material
def type(material):
"""
:param material:
:return: THREE compatible shader type
"""
logger.debug("material.type(%s)", material)
if material.diffuse_shader != 'LAMBERT':
material_type = constants.BASIC
elif material.specular_intensity > 0:
material_type = constants.PHONG
else:
material_type = constants.LAMBERT
return material_type
@_material
def use_vertex_colors(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.use_vertex_colors(%s)", material)
return material.use_vertex_color_paint
def used_materials():
"""
:return: list of materials that are in use
:rtype: generator
"""
logger.debug("material.used_materials()")
for material in data.materials:
if material.users > 0:
yield material.name
@_material
def visible(material):
"""
:param material:
:return: THREE_visible value
:rtype: bool
"""
logger.debug("material.visible(%s)", material)
try:
vis = material.THREE_visible
except AttributeError:
logger.debug("No THREE_visible attribute found")
vis = True
return vis
@_material
def wireframe(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.wireframe(%s)", material)
return material.type == WIRE
def _valid_textures(material, strict_use=True):
"""
:param material:
:rtype: generator
"""
for texture in material.texture_slots:
if not texture:
continue
if strict_use:
in_use = texture.use
else:
in_use = True
if texture.texture.type != IMAGE or not in_use:
continue
logger.debug("Valid texture found %s", texture)
yield texture
|
mit
|
Kismuz/btgym
|
btgym/research/model_based/datafeed/base.py
|
1
|
29953
|
###############################################################################
#
# Copyright (C) 2017, 2018 Andrew Muzikin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from logbook import Logger, StreamHandler, WARNING
import datetime
import sys, os
import copy
import backtrader.feeds as btfeeds
import numpy as np
import pandas as pd
from btgym.datafeed.derivative import BTgymDataset2
from btgym.datafeed.multi import BTgymMultiData
def base_random_generator_fn(num_points=10, **kwargs):
"""
Base random uniform generating function. Provides synthetic data points.
Args:
num_points: trajectory length
kwargs: any function parameters, not used here
Returns:
1d array of generated values; here: randoms in [0,1]
"""
return np.random.random(num_points)
def base_bias_generator_fn(num_points=10, bias=1, **kwargs):
"""
Base bias generating function. Provides constant synthetic data points.
Args:
num_points: trajectory length
bias: data point constant value >=0
kwargs: any function parameters, not used here
Returns:
1d array of generated values; here: randoms in [0,1]
"""
assert bias >= 0, 'Only positive bias allowed, got: {}'.format(bias)
return np.ones(num_points) * bias
def base_generator_parameters_fn(**kwargs):
"""
Base parameters generating function. Provides arguments for data generating function.
It itself accept arguments specified via `generator_parameters_config` dictionary;
Returns:
dictionary of kwargs consistent with generating function used.
"""
return dict()
def base_random_uniform_parameters_fn(**kwargs):
"""
Provides samples for kwargs given.
If parameter is set as float - returns exactly given value;
if parameter is set as iterable of form [a, b] - uniformly randomly samples parameters value
form given interval.
Args:
**kwargs: any kwarg specifying float or iterable of two ordered floats
Returns:
dictionary of kwargs holding sampled values
"""
samples = {}
for key, value in kwargs.items():
if type(value) in [int, float, np.float64]:
interval = [value, value]
else:
interval = list(value)
assert len(interval) == 2 and interval[0] <= interval[-1], \
'Expected parameter <{}> be float or ordered interval, got: {}'.format(key, value)
samples[key] = np.random.uniform(low=interval[0], high=interval[-1])
return samples
def base_spread_generator_fn(num_points=10, alpha=1, beta=1, minimum=0, maximum=0):
"""
Generates spread values for single synthetic tragectory. Samples drawn from parametrized beta-distribution;
If base generated trajectory P is given, than High/Ask value = P + 1/2 * Spread; Low/Bid value = P - 1/2* Spread
Args:
num_points: trajectory length
alpha: beta-distribution alpha param.
beta: beta-distribution beta param.
minimum: spread minimum value
maximum: spread maximum value
Returns:
1d array of generated values;
"""
assert alpha > 0 and beta > 0, 'Beta-distribution parameters should be non-negative, got: {},{}'.format(alpha, beta)
assert minimum <= maximum, 'Spread min/max values should form ordered pair, got: {}/{}'.format(minimum, maximum)
return minimum + np.random.beta(a=alpha, b=beta, size=num_points) * (maximum - minimum)
class BaseDataGenerator:
"""
Base synthetic data provider class.
"""
def __init__(
self,
episode_duration=None,
timeframe=1,
generator_fn=base_random_generator_fn,
generator_parameters_fn=base_generator_parameters_fn,
generator_parameters_config=None,
spread_generator_fn=None,
spread_generator_parameters=None,
name='BaseSyntheticDataGenerator',
data_names=('default_asset',),
parsing_params=None,
target_period=-1,
global_time=None,
task=0,
log_level=WARNING,
_nested_class_ref=None,
_nested_params=None,
**kwargs
):
"""
Args:
episode_duration: dict, duration of episode in days/hours/mins
generator_fn callabale, should return generated data as 1D np.array
generator_parameters_fn: callable, should return dictionary of generator_fn kwargs
generator_parameters_config: dict, generator_parameters_fn args
spread_generator_fn: callable, should return values of spread to form {High, Low}
spread_generator_parameters: dict, spread_generator_fn args
timeframe: int, data periodicity in minutes
name: str
data_names: iterable of str
target_period: int or dict, if set to -1 - disables `test` sampling
global_time: dict {y, m, d} to set custom global time (only for plotting)
task: int
log_level: logbook.Logger level
**kwargs:
"""
# Logging:
self.log_level = log_level
self.task = task
self.name = name
self.filename = self.name + '_sample'
self.target_period = target_period
self.data_names = data_names
self.data_name = self.data_names[0]
self.sample_instance = None
self.metadata = {'sample_num': 0, 'type': None, 'parent_sample_type': None}
self.data = None
self.data_stat = None
self.sample_num = 0
self.is_ready = False
if _nested_class_ref is None:
self.nested_class_ref = BaseDataGenerator
else:
self.nested_class_ref = _nested_class_ref
if _nested_params is None:
self.nested_params = dict(
episode_duration=episode_duration,
timeframe=timeframe,
generator_fn=generator_fn,
generator_parameters_fn=generator_parameters_fn,
generator_parameters_config=generator_parameters_config,
name=name,
data_names=data_names,
task=task,
log_level=log_level,
_nested_class_ref=_nested_class_ref,
_nested_params=_nested_params,
)
else:
self.nested_params = _nested_params
StreamHandler(sys.stdout).push_application()
self.log = Logger('{}_{}'.format(self.name, self.task), level=self.log_level)
# Default sample time duration:
if episode_duration is None:
self.episode_duration = dict(
days=0,
hours=23,
minutes=55,
)
else:
self.episode_duration = episode_duration
# Btfeed parsing setup:
if parsing_params is None:
self.parsing_params = dict(
names=['ask', 'bid', 'mid'],
datetime=0,
timeframe=1,
open='mid',
high='ask',
low='bid',
close='mid',
volume=-1,
openinterest=-1
)
else:
self.parsing_params = parsing_params
self.columns_map = {
'open': 'mean',
'high': 'maximum',
'low': 'minimum',
'close': 'mean',
'bid': 'minimum',
'ask': 'maximum',
'mid': 'mean',
'volume': 'nothing',
}
self.nested_params['parsing_params'] = self.parsing_params
for key, value in self.parsing_params.items():
setattr(self, key, value)
# base data feed related:
self.params = {}
if global_time is None:
self.global_time = datetime.datetime(year=2018, month=1, day=1)
else:
self.global_time = datetime.datetime(**global_time)
self.global_timestamp = self.global_time.timestamp()
# Infer time indexes and sample number of records:
self.train_index = pd.timedelta_range(
start=datetime.timedelta(days=0, hours=0, minutes=0),
end=datetime.timedelta(**self.episode_duration),
freq='{}min'.format(self.timeframe)
)
self.test_index = pd.timedelta_range(
start=self.train_index[-1] + datetime.timedelta(minutes=self.timeframe),
periods=len(self.train_index),
freq='{}min'.format(self.timeframe)
)
self.train_index += self.global_time
self.test_index += self.global_time
self.episode_num_records = len(self.train_index)
self.generator_fn = generator_fn
self.generator_parameters_fn = generator_parameters_fn
if generator_parameters_config is not None:
self.generator_parameters_config = generator_parameters_config
else:
self.generator_parameters_config = {}
self.spread_generator_fn = spread_generator_fn
if spread_generator_parameters is not None:
self.spread_generator_parameters = spread_generator_parameters
else:
self.spread_generator_parameters = {}
def set_logger(self, level=None, task=None):
"""
Sets logbook logger.
Args:
level: logbook.level, int
task: task id, int
"""
if task is not None:
self.task = task
if level is not None:
self.log = Logger('{}_{}'.format(self.name, self.task), level=level)
def reset(self, **kwargs):
self.read_csv()
self.sample_num = 0
self.is_ready = True
def read_csv(self, **kwargs):
self.data = self.generate_data(self.generator_parameters_fn(**self.generator_parameters_config))
def generate_data(self, generator_params, sample_type=0):
"""
Generates data trajectory, performs base consistency checks.
Args:
generator_params: dict, data_generating_function parameters
sample_type: 0 - generate train data | 1 - generate test data
Returns:
data as pandas dataframe
"""
assert sample_type in [0, 1],\
'Expected sample type be either 0 (train), or 1 (test) got: {}'.format(sample_type)
# Generate data points:
data_array = self.generator_fn(num_points=self.episode_num_records, **generator_params)
assert len(data_array.shape) == 1 and data_array.shape[0] == self.episode_num_records,\
'Expected generated data to be 1D array of length {}, got data shape: {}'.format(
self.episode_num_records,
data_array.shape
)
if self.spread_generator_fn is not None:
spread_array = self.spread_generator_fn(
num_points=self.episode_num_records,
**self.spread_generator_parameters
)
assert len(spread_array.shape) == 1 and spread_array.shape[0] == self.episode_num_records, \
'Expected generated spread to be 1D array of length {}, got data shape: {}'.format(
self.episode_num_records,
spread_array.shape
)
else:
spread_array = np.zeros(self.episode_num_records)
data_dict = {
'mean': data_array,
'maximum': data_array + .5 * spread_array,
'minimum': data_array - .5 * spread_array,
'nothing': data_array * 0.0,
}
# negs = data_dict['minimum'] < 0
# if negs.any():
# self.log.warning('{} negative generated values detected'.format(negs.shape[0]))
# Make dataframe:
if sample_type:
index = self.test_index
else:
index = self.train_index
# Map dictionary of data to dataframe columns:
df = pd.DataFrame(data={name: data_dict[self.columns_map[name]] for name in self.names}, index=index)
# df = df.set_index('hh:mm:ss')
return df
def sample(self, get_new=True, sample_type=0, **kwargs):
"""
Samples continuous subset of data.
Args:
get_new (bool): not used;
sample_type (int or bool): 0 (train) or 1 (test) - get sample from train or test data subsets
respectively.
Returns:
Dataset instance with number of records ~ max_episode_len.
"""
try:
assert sample_type in [0, 1]
except AssertionError:
msg = 'Sampling attempt: expected sample type be in {}, got: {}'.format([0, 1], sample_type)
self.log.error(msg)
raise ValueError(msg)
if self.target_period == -1 and sample_type:
msg = 'Attempt to sample type {} given disabled target_period'.format(sample_type)
self.log.error(msg)
raise ValueError(msg)
if self.metadata['type'] is not None:
if self.metadata['type'] != sample_type:
self.log.warning(
'Attempt to sample type {} given current sample type {}, overriden.'.format(
sample_type,
self.metadata['type']
)
)
sample_type = self.metadata['type']
# Get sample:
self.sample_instance = self.sample_synthetic(sample_type)
self.sample_instance.metadata['type'] = sample_type
self.sample_instance.metadata['sample_num'] = self.sample_num
self.sample_instance.metadata['parent_sample_num'] = self.metadata['sample_num']
self.sample_instance.metadata['parent_sample_type'] = self.metadata['type']
self.sample_num += 1
return self.sample_instance
def sample_synthetic(self, sample_type=0):
"""
Get data_generator instance containing synthetic data.
Args:
sample_type (int or bool): 0 (train) or 1 (test) - get sample with train or test time periods
respectively.
Returns:
nested_class_ref instance
"""
# Generate data:
generator_params = self.generator_parameters_fn(**self.generator_parameters_config)
data = self.generate_data(generator_params, sample_type=sample_type)
# Make data_class instance:
sample_instance = self.nested_class_ref(**self.nested_params)
sample_instance.filename += '_{}'.format(self.sample_num)
self.log.info('New sample id: <{}>.'.format(sample_instance.filename))
# Add data and metadata:
sample_instance.data = data
sample_instance.metadata['generator'] = generator_params
sample_instance.metadata['first_row'] = 0
sample_instance.metadata['last_row'] = self.episode_num_records
return sample_instance
def describe(self):
"""
Returns summary dataset statistic as pandas dataframe:
- records count,
- data mean,
- data std dev,
- min value,
- 25% percentile,
- 50% percentile,
- 75% percentile,
- max value
for every data column.
"""
# Pretty straightforward, using standard pandas utility.
# The only caveat here is that if actual data has not been loaded yet, need to load, describe and unload again,
# thus avoiding passing big files to BT server:
flush_data = False
try:
assert not self.data.empty
pass
except (AssertionError, AttributeError) as e:
self.read_csv()
flush_data = True
self.data_stat = self.data.describe()
self.log.info('Data summary:\n{}'.format(self.data_stat.to_string()))
if flush_data:
self.data = None
self.log.info('Flushed data.')
return self.data_stat
def to_btfeed(self):
"""
Performs BTgymData-->bt.feed conversion.
Returns:
dict of type: {data_line_name: bt.datafeed instance}.
"""
try:
assert not self.data.empty
btfeed = btfeeds.PandasDirectData(
dataname=self.data,
timeframe=self.timeframe,
datetime=self.datetime,
open=self.open,
high=self.high,
low=self.low,
close=self.close,
volume=self.volume,
openinterest=self.openinterest
)
btfeed.numrecords = self.data.shape[0]
return {self.data_name: btfeed}
except (AssertionError, AttributeError) as e:
msg = 'Instance holds no data. Hint: forgot to call .read_csv()?'
self.log.error(msg)
raise AssertionError(msg)
def set_global_timestamp(self, timestamp):
pass
class BaseCombinedDataSet:
"""
Data provider class wrapper incorporates synthetic train and real test data streams.
"""
def __init__(
self,
train_data_config,
test_data_config,
train_class_ref=BaseDataGenerator,
test_class_ref=BTgymDataset2,
name='CombinedDataSet',
**kwargs
):
"""
Args:
filename: str, test data filename
parsing_params: dict test data parsing params
episode_duration_train: dict, duration of train episode in days/hours/mins
episode_duration_test: dict, duration of test episode in days/hours/mins
time_gap: dict test episode duration tolerance
start_00: bool, def=False
generator_fn callabale, should return generated data as 1D np.array
generator_parameters_fn: callable, should return dictionary of generator_fn kwargs
generator_parameters_config: dict, generator_parameters_fn args
timeframe: int, data periodicity in minutes
name: str
data_names: iterable of str
global_time: dict {y, m, d} to set custom global time (here for plotting only)
task: int
log_level: logbook.Logger level
**kwargs: common kwargs
"""
self.name = name
self.log = None
try:
self.task = kwargs['task']
except KeyError:
self.task = None
self.train_data_config = train_data_config
self.test_data_config = test_data_config
self.train_data_config.update(kwargs)
self.test_data_config.update(kwargs)
self.train_data_config['name'] = self.name + '/train'
self.test_data_config['name'] = self.name + '/test'
# Declare all test data come from target domain:
self.test_data_config['target_period'] = -1
self.test_data_config['test_period'] = -1
self.streams = {
'train': train_class_ref(**self.train_data_config),
'test': test_class_ref(**self.test_data_config),
}
self.sample_instance = None
self.sample_num = 0
self.is_ready = False
# Legacy parameters, left here for BTgym API_shell:
try:
self.parsing_params = kwargs['parsing_params']
except KeyError:
self.parsing_params = dict(
sep=',',
header=0,
index_col=0,
parse_dates=True,
names=['ask', 'bid', 'mid'],
dataname=None,
datetime=0,
nullvalue=0.0,
timeframe=1,
high=1, # 'ask',
low=2, # 'bid',
open=3, # 'mid',
close=3, # 'mid',
volume=-1,
openinterest=-1,
)
try:
self.sampling_params = kwargs['sampling_params']
except KeyError:
self.sampling_params = {}
self.params = {}
self.params.update(self.parsing_params)
self.params.update(self.sampling_params)
self.set_params(self.params)
self.data_names = self.streams['test'].data_names
self.global_timestamp = 0
def set_params(self, params_dict):
"""
Batch attribute setter.
Args:
params_dict: dictionary of parameters to be set as instance attributes.
"""
for key, value in params_dict.items():
setattr(self, key, value)
def set_logger(self, *args, **kwargs):
for stream in self.streams.values():
stream.set_logger(*args, **kwargs)
self.log = self.streams['test'].log
def reset(self, *args, **kwargs):
for stream in self.streams.values():
stream.reset(*args, **kwargs)
self.task = self.streams['test'].task
self.global_timestamp = self.streams['test'].global_timestamp
self.sample_num = 0
self.is_ready = True
def read_csv(self, *args, **kwargs):
for stream in self.streams.values():
stream.read_csv(*args, **kwargs)
def describe(self,*args, **kwargs):
return self.streams['test'].describe()
def set_global_timestamp(self, *args, **kwargs):
for stream in self.streams.values():
stream.set_global_timestamp(*args, **kwargs)
self.global_timestamp = self.streams['test'].global_timestamp
def to_btfeed(self):
raise NotImplementedError
def sample(self, sample_type=0, **kwargs):
"""
Samples continuous subset of data.
Args:
sample_type (int or bool): 0 (train) or 1 (test) - get sample from train or test data subsets
respectively.
Returns:
Dataset instance with number of records ~ max_episode_len,
"""
try:
assert sample_type in [0, 1]
except AssertionError:
self.log.exception(
'Sampling attempt: expected sample type be in {}, got: {}'.\
format([0, 1], sample_type)
)
raise AssertionError
if sample_type:
self.sample_instance = self.streams['test'].sample(sample_type=sample_type, **kwargs)
self.sample_instance.metadata['generator'] = {}
else:
self.sample_instance = self.streams['train'].sample(sample_type=sample_type, **kwargs)
# Common metadata:
self.sample_instance.metadata['type'] = sample_type
self.sample_instance.metadata['sample_num'] = self.sample_num
self.sample_instance.metadata['parent_sample_num'] = 0
self.sample_instance.metadata['parent_sample_type'] = None
self.sample_num += 1
return self.sample_instance
class BasePairDataGenerator(BTgymMultiData):
"""
Generates pair of data streams driven by single 2-level generating process.
TODO: make data generating process single stand-along function or class method, do not use BaseDataGenerator's
"""
def __init__(
self,
data_names,
process1_config=None, # bias generator
process2_config=None, # spread generator
data_class_ref=BaseDataGenerator,
name='PairDataGenerator',
_top_level=True,
**kwargs
):
assert len(list(data_names)) == 2, 'Expected `data_names` be pair of `str`, got: {}'.format(data_names)
if process1_config is None:
self.process1_config = {
'generator_fn': base_bias_generator_fn,
'generator_parameters_fn': base_generator_parameters_fn,
'generator_parameters_config': None,
}
else:
self.process1_config = process1_config
if process2_config is None:
self.process2_config = {
'generator_fn': base_random_generator_fn,
'generator_parameters_fn': base_generator_parameters_fn,
'generator_parameters_config': None,
}
else:
self.process2_config = process2_config
data_config = {name: {'filename': None, 'config': {}} for name in data_names}
# Let first asset hold p1 generating process:
self.a1_name = data_names[0]
data_config[self.a1_name]['config'].update(self.process1_config)
# Second asset will hold p2 generating process:
self.a2_name = data_names[-1]
data_config[self.a2_name]['config'].update(self.process2_config)
self.nested_kwargs = kwargs
self.get_new_sample = not _top_level
super(BasePairDataGenerator, self).__init__(
data_config=data_config,
data_names=data_names,
data_class_ref=data_class_ref,
name=name,
**kwargs
)
def sample(self, sample_type=0, **kwargs):
if self.get_new_sample:
# Get process1 trajectory:
p1_sample = self.data[self.a1_name].sample(sample_type=sample_type, **kwargs)
# Get p2 trajectory:
p2_sample = self.data[self.a2_name].sample(sample_type=sample_type, **kwargs)
idx_intersected = p1_sample.data.index.intersection(p2_sample.data.index)
self.log.info('p1/p2 shared num. records: {}'.format(len(idx_intersected)))
# TODO: move this generating process to stand-along function
# Combine processes:
data1 = p1_sample.data + 0.5 * p2_sample.data
data2 = p1_sample.data - 0.5 * p2_sample.data
metadata = copy.deepcopy(p2_sample.metadata)
else:
data1 = None
data2 = None
metadata = {}
metadata.update(
{'type': sample_type, 'sample_num': self.sample_num, 'parent_sample_type': self.sample_num, 'parent_sample_num': sample_type}
)
# Prepare empty instance of multi_stream data:
sample = BasePairDataGenerator(
data_names=self.data_names,
process1_config=self.process1_config,
process2_config=self.process2_config,
data_class_ref=self.data_class_ref,
# task=self.task,
# log_level=self.log_level,
name='sub_' + self.name,
_top_level=False,
**self.nested_kwargs
)
# TODO: maybe add p1 metadata
sample.metadata = copy.deepcopy(metadata)
# Populate sample with data:
sample.data[self.a1_name].data = data1
sample.data[self.a2_name].data = data2
sample.filename = {key: stream.filename for key, stream in self.data.items()}
self.sample_num += 1
return sample
class BasePairCombinedDataSet(BaseCombinedDataSet):
"""
Provides doubled streams of simulated train / real test data.
Suited for pairs or spread trading setup.
"""
def __init__(
self,
assets_filenames,
process1_config=None,
process2_config=None,
train_episode_duration=None,
test_episode_duration=None,
train_class_ref=BasePairDataGenerator,
test_class_ref=BTgymMultiData,
name='PairCombinedDataSet',
**kwargs
):
assert isinstance(assets_filenames, dict),\
'Expected `assets_filenames` type `dict`, got {} '.format(type(assets_filenames))
data_names = [name for name in assets_filenames.keys()]
assert len(data_names) == 2, 'Expected exactly two assets, got: {}'.format(data_names)
train_data_config = dict(
data_names=data_names,
process1_config=process1_config,
process2_config=process2_config,
data_class_ref=BaseDataGenerator,
episode_duration=train_episode_duration,
# name=name,
)
test_data_config = dict(
data_class_ref=BTgymDataset2,
data_config={asset_name: {'filename': file_name} for asset_name, file_name in assets_filenames.items()},
episode_duration=test_episode_duration,
# name=name,
)
super(BasePairCombinedDataSet, self).__init__(
train_data_config=train_data_config,
test_data_config=test_data_config,
train_class_ref=train_class_ref,
test_class_ref=test_class_ref,
name=name,
**kwargs
)
|
lgpl-3.0
|
hcorg/thrift
|
contrib/fb303/py/fb303/FacebookBase.py
|
43
|
2038
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
import FacebookService
import thrift.reflection.limited
from ttypes import fb_status
class FacebookBase(FacebookService.Iface):
def __init__(self, name):
self.name = name
self.alive = int(time.time())
self.counters = {}
def getName(self, ):
return self.name
def getVersion(self, ):
return ''
def getStatus(self, ):
return fb_status.ALIVE
def getCounters(self):
return self.counters
def resetCounter(self, key):
self.counters[key] = 0
def getCounter(self, key):
if self.counters.has_key(key):
return self.counters[key]
return 0
def incrementCounter(self, key):
self.counters[key] = self.getCounter(key) + 1
def setOption(self, key, value):
pass
def getOption(self, key):
return ""
def getOptions(self):
return {}
def getOptions(self):
return {}
def aliveSince(self):
return self.alive
def getCpuProfile(self, duration):
return ""
def getLimitedReflection(self):
return thrift.reflection.limited.Service()
def reinitialize(self):
pass
def shutdown(self):
pass
|
apache-2.0
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/.install/.backup/lib/apiclient/channel.py
|
120
|
9848
|
"""Channel notifications support.
Classes and functions to support channel subscriptions and notifications
on those channels.
Notes:
- This code is based on experimental APIs and is subject to change.
- Notification does not do deduplication of notification ids, that's up to
the receiver.
- Storing the Channel between calls is up to the caller.
Example setting up a channel:
# Create a new channel that gets notifications via webhook.
channel = new_webhook_channel("https://example.com/my_web_hook")
# Store the channel, keyed by 'channel.id'. Store it before calling the
# watch method because notifications may start arriving before the watch
# method returns.
...
resp = service.objects().watchAll(
bucket="some_bucket_id", body=channel.body()).execute()
channel.update(resp)
# Store the channel, keyed by 'channel.id'. Store it after being updated
# since the resource_id value will now be correct, and that's needed to
# stop a subscription.
...
An example Webhook implementation using webapp2. Note that webapp2 puts
headers in a case insensitive dictionary, as headers aren't guaranteed to
always be upper case.
id = self.request.headers[X_GOOG_CHANNEL_ID]
# Retrieve the channel by id.
channel = ...
# Parse notification from the headers, including validating the id.
n = notification_from_headers(channel, self.request.headers)
# Do app specific stuff with the notification here.
if n.resource_state == 'sync':
# Code to handle sync state.
elif n.resource_state == 'exists':
# Code to handle the exists state.
elif n.resource_state == 'not_exists':
# Code to handle the not exists state.
Example of unsubscribing.
service.channels().stop(channel.body())
"""
import datetime
import uuid
from apiclient import errors
from oauth2client import util
# The unix time epoch starts at midnight 1970.
EPOCH = datetime.datetime.utcfromtimestamp(0)
# Map the names of the parameters in the JSON channel description to
# the parameter names we use in the Channel class.
CHANNEL_PARAMS = {
'address': 'address',
'id': 'id',
'expiration': 'expiration',
'params': 'params',
'resourceId': 'resource_id',
'resourceUri': 'resource_uri',
'type': 'type',
'token': 'token',
}
X_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID'
X_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'
X_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'
X_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI'
X_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID'
def _upper_header_keys(headers):
new_headers = {}
for k, v in headers.iteritems():
new_headers[k.upper()] = v
return new_headers
class Notification(object):
"""A Notification from a Channel.
Notifications are not usually constructed directly, but are returned
from functions like notification_from_headers().
Attributes:
message_number: int, The unique id number of this notification.
state: str, The state of the resource being monitored.
uri: str, The address of the resource being monitored.
resource_id: str, The unique identifier of the version of the resource at
this event.
"""
@util.positional(5)
def __init__(self, message_number, state, resource_uri, resource_id):
"""Notification constructor.
Args:
message_number: int, The unique id number of this notification.
state: str, The state of the resource being monitored. Can be one
of "exists", "not_exists", or "sync".
resource_uri: str, The address of the resource being monitored.
resource_id: str, The identifier of the watched resource.
"""
self.message_number = message_number
self.state = state
self.resource_uri = resource_uri
self.resource_id = resource_id
class Channel(object):
"""A Channel for notifications.
Usually not constructed directly, instead it is returned from helper
functions like new_webhook_channel().
Attributes:
type: str, The type of delivery mechanism used by this channel. For
example, 'web_hook'.
id: str, A UUID for the channel.
token: str, An arbitrary string associated with the channel that
is delivered to the target address with each event delivered
over this channel.
address: str, The address of the receiving entity where events are
delivered. Specific to the channel type.
expiration: int, The time, in milliseconds from the epoch, when this
channel will expire.
params: dict, A dictionary of string to string, with additional parameters
controlling delivery channel behavior.
resource_id: str, An opaque id that identifies the resource that is
being watched. Stable across different API versions.
resource_uri: str, The canonicalized ID of the watched resource.
"""
@util.positional(5)
def __init__(self, type, id, token, address, expiration=None,
params=None, resource_id="", resource_uri=""):
"""Create a new Channel.
In user code, this Channel constructor will not typically be called
manually since there are functions for creating channels for each specific
type with a more customized set of arguments to pass.
Args:
type: str, The type of delivery mechanism used by this channel. For
example, 'web_hook'.
id: str, A UUID for the channel.
token: str, An arbitrary string associated with the channel that
is delivered to the target address with each event delivered
over this channel.
address: str, The address of the receiving entity where events are
delivered. Specific to the channel type.
expiration: int, The time, in milliseconds from the epoch, when this
channel will expire.
params: dict, A dictionary of string to string, with additional parameters
controlling delivery channel behavior.
resource_id: str, An opaque id that identifies the resource that is
being watched. Stable across different API versions.
resource_uri: str, The canonicalized ID of the watched resource.
"""
self.type = type
self.id = id
self.token = token
self.address = address
self.expiration = expiration
self.params = params
self.resource_id = resource_id
self.resource_uri = resource_uri
def body(self):
"""Build a body from the Channel.
Constructs a dictionary that's appropriate for passing into watch()
methods as the value of body argument.
Returns:
A dictionary representation of the channel.
"""
result = {
'id': self.id,
'token': self.token,
'type': self.type,
'address': self.address
}
if self.params:
result['params'] = self.params
if self.resource_id:
result['resourceId'] = self.resource_id
if self.resource_uri:
result['resourceUri'] = self.resource_uri
if self.expiration:
result['expiration'] = self.expiration
return result
def update(self, resp):
"""Update a channel with information from the response of watch().
When a request is sent to watch() a resource, the response returned
from the watch() request is a dictionary with updated channel information,
such as the resource_id, which is needed when stopping a subscription.
Args:
resp: dict, The response from a watch() method.
"""
for json_name, param_name in CHANNEL_PARAMS.iteritems():
value = resp.get(json_name)
if value is not None:
setattr(self, param_name, value)
def notification_from_headers(channel, headers):
"""Parse a notification from the webhook request headers, validate
the notification, and return a Notification object.
Args:
channel: Channel, The channel that the notification is associated with.
headers: dict, A dictionary like object that contains the request headers
from the webhook HTTP request.
Returns:
A Notification object.
Raises:
errors.InvalidNotificationError if the notification is invalid.
ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.
"""
headers = _upper_header_keys(headers)
channel_id = headers[X_GOOG_CHANNEL_ID]
if channel.id != channel_id:
raise errors.InvalidNotificationError(
'Channel id mismatch: %s != %s' % (channel.id, channel_id))
else:
message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
state = headers[X_GOOG_RESOURCE_STATE]
resource_uri = headers[X_GOOG_RESOURCE_URI]
resource_id = headers[X_GOOG_RESOURCE_ID]
return Notification(message_number, state, resource_uri, resource_id)
@util.positional(2)
def new_webhook_channel(url, token=None, expiration=None, params=None):
"""Create a new webhook Channel.
Args:
url: str, URL to post notifications to.
token: str, An arbitrary string associated with the channel that
is delivered to the target address with each notification delivered
over this channel.
expiration: datetime.datetime, A time in the future when the channel
should expire. Can also be None if the subscription should use the
default expiration. Note that different services may have different
limits on how long a subscription lasts. Check the response from the
watch() method to see the value the service has set for an expiration
time.
params: dict, Extra parameters to pass on channel creation. Currently
not used for webhook channels.
"""
expiration_ms = 0
if expiration:
delta = expiration - EPOCH
expiration_ms = delta.microseconds/1000 + (
delta.seconds + delta.days*24*3600)*1000
if expiration_ms < 0:
expiration_ms = 0
return Channel('web_hook', str(uuid.uuid4()),
token, url, expiration=expiration_ms,
params=params)
|
gpl-3.0
|
daavery/audacity
|
lib-src/lv2/lilv/waflib/Tools/gnu_dirs.py
|
329
|
2796
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils,Options,Context
_options=[x.split(', ')for x in'''
bindir, user executables, ${EXEC_PREFIX}/bin
sbindir, system admin executables, ${EXEC_PREFIX}/sbin
libexecdir, program executables, ${EXEC_PREFIX}/libexec
sysconfdir, read-only single-machine data, ${PREFIX}/etc
sharedstatedir, modifiable architecture-independent data, ${PREFIX}/com
localstatedir, modifiable single-machine data, ${PREFIX}/var
libdir, object code libraries, ${EXEC_PREFIX}/lib
includedir, C header files, ${PREFIX}/include
oldincludedir, C header files for non-gcc, /usr/include
datarootdir, read-only arch.-independent data root, ${PREFIX}/share
datadir, read-only architecture-independent data, ${DATAROOTDIR}
infodir, info documentation, ${DATAROOTDIR}/info
localedir, locale-dependent data, ${DATAROOTDIR}/locale
mandir, man documentation, ${DATAROOTDIR}/man
docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE}
htmldir, html documentation, ${DOCDIR}
dvidir, dvi documentation, ${DOCDIR}
pdfdir, pdf documentation, ${DOCDIR}
psdir, ps documentation, ${DOCDIR}
'''.split('\n')if x]
def configure(conf):
def get_param(varname,default):
return getattr(Options.options,varname,'')or default
env=conf.env
env.LIBDIR=env.BINDIR=[]
env.EXEC_PREFIX=get_param('EXEC_PREFIX',env.PREFIX)
env.PACKAGE=getattr(Context.g_module,'APPNAME',None)or env.PACKAGE
complete=False
iter=0
while not complete and iter<len(_options)+1:
iter+=1
complete=True
for name,help,default in _options:
name=name.upper()
if not env[name]:
try:
env[name]=Utils.subst_vars(get_param(name,default).replace('/',os.sep),env)
except TypeError:
complete=False
if not complete:
lst=[name for name,_,_ in _options if not env[name.upper()]]
raise conf.errors.WafError('Variable substitution failure %r'%lst)
def options(opt):
inst_dir=opt.add_option_group('Installation directories','By default, "waf install" will put the files in\
"/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\
than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"')
for k in('--prefix','--destdir'):
option=opt.parser.get_option(k)
if option:
opt.parser.remove_option(k)
inst_dir.add_option(option)
inst_dir.add_option('--exec-prefix',help='installation prefix [Default: ${PREFIX}]',default='',dest='EXEC_PREFIX')
dirs_options=opt.add_option_group('Pre-defined installation directories','')
for name,help,default in _options:
option_name='--'+name
str_default=default
str_help='%s [Default: %s]'%(help,str_default)
dirs_options.add_option(option_name,help=str_help,default='',dest=name.upper())
|
gpl-2.0
|
zhangwenyan/three.js
|
utils/exporters/blender/addons/io_three/exporter/api/mesh.py
|
124
|
23228
|
"""
Blender API for querying mesh data. Animation data is also
handled here since Three.js associates the animation (skeletal,
morph targets) with the geometry nodes.
"""
import operator
from bpy import data, types, context
from . import material, texture, animation
from . import object as object_
from .. import constants, utilities, logger, exceptions
def _mesh(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Mesh):
mesh = name
else:
mesh = data.meshes[name]
return func(mesh, *args, **kwargs)
return inner
@_mesh
def skeletal_animation(mesh, options):
"""
:param mesh:
:param options:
:rtype: []
"""
logger.debug("mesh.animation(%s, %s)", mesh, options)
armature = _armature(mesh)
if not armature:
logger.warning("No armature found (%s)", mesh)
return []
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
dispatch = {
constants.POSE: animation.pose_animation,
constants.REST: animation.rest_animation
}
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
animations = func(armature, options)
# armature.data.pose_position = pose_position
return animations
@_mesh
def bones(mesh, options):
"""
:param mesh:
:param options:
:rtype: [], {}
"""
logger.debug("mesh.bones(%s)", mesh)
armature = _armature(mesh)
if not armature:
return [], {}
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
if anim_type == constants.OFF:
logger.info("Animation type not set, defaulting "
"to using REST position for the armature.")
func = _rest_bones
# armature.data.pose_position = "REST"
else:
dispatch = {
constants.REST: _rest_bones,
constants.POSE: _pose_bones
}
logger.info("Using %s for the armature", anim_type)
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
bones_, bone_map = func(armature)
# armature.data.pose_position = pose_position
return (bones_, bone_map)
@_mesh
def buffer_normal(mesh):
"""
:param mesh:
:rtype: []
"""
normals_ = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
normals_.extend(vector)
return normals_
@_mesh
def buffer_position(mesh):
"""
:param mesh:
:rtype: []
"""
position = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
vector = (vertex.co.x, vertex.co.y, vertex.co.z)
position.extend(vector)
return position
@_mesh
def buffer_uv(mesh):
"""
:param mesh:
:rtype: []
"""
uvs_ = []
if len(mesh.uv_layers) is 0:
return uvs_
elif len(mesh.uv_layers) > 1:
# if memory serves me correctly buffer geometry
# only uses one UV layer
logger.warning("%s has more than 1 UV layer", mesh.name)
for uv_data in mesh.uv_layers[0].data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uvs_.extend(uv_tuple)
return uvs_
@_mesh
def faces(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.faces(%s, %s)", mesh, options)
vertex_uv = len(mesh.uv_textures) > 0
has_colors = len(mesh.vertex_colors) > 0
logger.info("Has UVs = %s", vertex_uv)
logger.info("Has vertex colours = %s", has_colors)
opt_colours = options[constants.COLORS] and has_colors
opt_uvs = options[constants.UVS] and vertex_uv
opt_materials = options.get(constants.FACE_MATERIALS)
opt_normals = options[constants.NORMALS]
logger.debug("Vertex colours enabled = %s", opt_colours)
logger.debug("UVS enabled = %s", opt_uvs)
logger.debug("Materials enabled = %s", opt_materials)
logger.debug("Normals enabled = %s", opt_normals)
uv_layers = _uvs(mesh) if opt_uvs else None
vertex_normals = _normals(mesh) if opt_normals else None
vertex_colours = vertex_colors(mesh) if opt_colours else None
faces_data = []
colour_indices = {}
if vertex_colours:
logger.debug("Indexing colours")
for index, colour in enumerate(vertex_colours):
colour_indices[str(colour)] = index
normal_indices = {}
if vertex_normals:
logger.debug("Indexing normals")
for index, normal in enumerate(vertex_normals):
normal_indices[str(normal)] = index
logger.info("Parsing %d faces", len(mesh.tessfaces))
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count not in (3, 4):
logger.error("%d vertices for face %d detected",
vert_count,
face.index)
raise exceptions.NGonError("ngons are not supported")
mat_index = face.material_index is not None and opt_materials
mask = {
constants.QUAD: vert_count is 4,
constants.MATERIALS: mat_index,
constants.UVS: False,
constants.NORMALS: False,
constants.COLORS: False
}
face_data = []
face_data.extend([v for v in face.vertices])
if mask[constants.MATERIALS]:
face_data.append(face.material_index)
# @TODO: this needs the same optimization as what
# was done for colours and normals
if uv_layers:
for index, uv_layer in enumerate(uv_layers):
layer = mesh.tessface_uv_textures[index]
for uv_data in layer.data[face.index].uv:
uv_tuple = (uv_data[0], uv_data[1])
face_data.append(uv_layer.index(uv_tuple))
mask[constants.UVS] = True
if vertex_normals:
for vertex in face.vertices:
normal = mesh.vertices[vertex].normal
normal = (normal.x, normal.y, normal.z)
face_data.append(normal_indices[str(normal)])
mask[constants.NORMALS] = True
if vertex_colours:
colours = mesh.tessface_vertex_colors.active.data[face.index]
for each in (colours.color1, colours.color2, colours.color3):
each = utilities.rgb2int(each)
face_data.append(colour_indices[str(each)])
mask[constants.COLORS] = True
if mask[constants.QUAD]:
colour = utilities.rgb2int(colours.color4)
face_data.append(colour_indices[str(colour)])
face_data.insert(0, utilities.bit_mask(mask))
faces_data.extend(face_data)
return faces_data
@_mesh
def morph_targets(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.morph_targets(%s, %s)", mesh, options)
obj = object_.objects_using_mesh(mesh)[0]
original_frame = context.scene.frame_current
frame_step = options.get(constants.FRAME_STEP, 1)
scene_frames = range(context.scene.frame_start,
context.scene.frame_end+1,
frame_step)
morphs = []
for frame in scene_frames:
logger.info("Processing data at frame %d", frame)
context.scene.frame_set(frame, 0.0)
morphs.append([])
vertices_ = object_.extract_mesh(obj, options).vertices[:]
for vertex in vertices_:
morphs[-1].extend([vertex.co.x, vertex.co.y, vertex.co.z])
context.scene.frame_set(original_frame, 0.0)
morphs_detected = False
for index, each in enumerate(morphs):
if index is 0:
continue
morphs_detected = morphs[index-1] != each
if morphs_detected:
logger.info("Valid morph target data detected")
break
else:
logger.info("No valid morph data detected")
return []
manifest = []
for index, morph in enumerate(morphs):
manifest.append({
constants.NAME: 'animation_%06d' % index,
constants.VERTICES: morph
})
return manifest
@_mesh
def materials(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.materials(%s, %s)", mesh, options)
indices = set([face.material_index for face in mesh.tessfaces])
material_sets = [(mesh.materials[index], index) for index in indices]
materials_ = []
maps = options.get(constants.MAPS)
mix = options.get(constants.MIX_COLORS)
use_colors = options.get(constants.COLORS)
logger.info("Colour mix is set to %s", mix)
logger.info("Vertex colours set to %s", use_colors)
for mat, index in material_sets:
try:
dbg_color = constants.DBG_COLORS[index]
except IndexError:
dbg_color = constants.DBG_COLORS[0]
logger.info("Compiling attributes for %s", mat.name)
attributes = {
constants.COLOR_AMBIENT: material.ambient_color(mat),
constants.COLOR_EMISSIVE: material.emissive_color(mat),
constants.SHADING: material.shading(mat),
constants.OPACITY: material.opacity(mat),
constants.TRANSPARENT: material.transparent(mat),
constants.VISIBLE: material.visible(mat),
constants.WIREFRAME: material.wireframe(mat),
constants.BLENDING: material.blending(mat),
constants.DEPTH_TEST: material.depth_test(mat),
constants.DEPTH_WRITE: material.depth_write(mat),
constants.DBG_NAME: mat.name,
constants.DBG_COLOR: dbg_color,
constants.DBG_INDEX: index
}
if use_colors:
colors = material.use_vertex_colors(mat)
attributes[constants.VERTEX_COLORS] = colors
if (use_colors and mix) or (not use_colors):
colors = material.diffuse_color(mat)
attributes[constants.COLOR_DIFFUSE] = colors
if attributes[constants.SHADING] == constants.PHONG:
logger.info("Adding specular attributes")
attributes.update({
constants.SPECULAR_COEF: material.specular_coef(mat),
constants.COLOR_SPECULAR: material.specular_color(mat)
})
if mesh.show_double_sided:
logger.info("Double sided is on")
attributes[constants.DOUBLE_SIDED] = True
materials_.append(attributes)
if not maps:
continue
diffuse = _diffuse_map(mat)
if diffuse:
logger.info("Diffuse map found")
attributes.update(diffuse)
light = _light_map(mat)
if light:
logger.info("Light map found")
attributes.update(light)
specular = _specular_map(mat)
if specular:
logger.info("Specular map found")
attributes.update(specular)
if attributes[constants.SHADING] == constants.PHONG:
normal = _normal_map(mat)
if normal:
logger.info("Normal map found")
attributes.update(normal)
bump = _bump_map(mat)
if bump:
logger.info("Bump map found")
attributes.update(bump)
return materials_
@_mesh
def normals(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.normals(%s)", mesh)
normal_vectors = []
for vector in _normals(mesh):
normal_vectors.extend(vector)
return normal_vectors
@_mesh
def skin_weights(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_weights(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 1)
@_mesh
def skin_indices(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_indices(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 0)
@_mesh
def texture_registration(mesh):
"""
:param mesh:
"""
logger.debug("mesh.texture_registration(%s)", mesh)
materials_ = mesh.materials or []
registration = {}
funcs = (
(constants.MAP_DIFFUSE, material.diffuse_map),
(constants.SPECULAR_MAP, material.specular_map),
(constants.LIGHT_MAP, material.light_map),
(constants.BUMP_MAP, material.bump_map),
(constants.NORMAL_MAP, material.normal_map)
)
def _registration(file_path, file_name):
"""
:param file_path:
:param file_name:
"""
return {
'file_path': file_path,
'file_name': file_name,
'maps': []
}
logger.info("found %d materials", len(materials_))
for mat in materials_:
for (key, func) in funcs:
tex = func(mat)
if tex is None:
continue
logger.info("%s has texture %s", key, tex.name)
file_path = texture.file_path(tex)
file_name = texture.file_name(tex)
reg = registration.setdefault(
utilities.hash(file_path),
_registration(file_path, file_name))
reg["maps"].append(key)
return registration
@_mesh
def uvs(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.uvs(%s)", mesh)
uvs_ = []
for layer in _uvs(mesh):
uvs_.append([])
logger.info("Parsing UV layer %d", len(uvs_))
for pair in layer:
uvs_[-1].extend(pair)
return uvs_
@_mesh
def vertex_colors(mesh):
"""
:param mesh:
"""
logger.debug("mesh.vertex_colors(%s)", mesh)
vertex_colours = []
try:
vertex_colour = mesh.tessface_vertex_colors.active.data
except AttributeError:
logger.info("No vertex colours found")
return
for face in mesh.tessfaces:
colours = (vertex_colour[face.index].color1,
vertex_colour[face.index].color2,
vertex_colour[face.index].color3,
vertex_colour[face.index].color4)
for colour in colours:
colour = utilities.rgb2int((colour.r, colour.g, colour.b))
if colour not in vertex_colours:
vertex_colours.append(colour)
return vertex_colours
@_mesh
def vertices(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.vertices(%s)", mesh)
vertices_ = []
for vertex in mesh.vertices:
vertices_.extend((vertex.co.x, vertex.co.y, vertex.co.z))
return vertices_
def _normal_map(mat):
"""
:param mat:
"""
tex = material.normal_map(mat)
if tex is None:
return
logger.info("Found normal texture map %s", tex.name)
normal = {
constants.MAP_NORMAL:
texture.file_name(tex),
constants.MAP_NORMAL_FACTOR:
material.normal_scale(mat),
constants.MAP_NORMAL_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_NORMAL_WRAP: texture.wrap(tex),
constants.MAP_NORMAL_REPEAT: texture.repeat(tex)
}
return normal
def _bump_map(mat):
"""
:param mat:
"""
tex = material.bump_map(mat)
if tex is None:
return
logger.info("Found bump texture map %s", tex.name)
bump = {
constants.MAP_BUMP:
texture.file_name(tex),
constants.MAP_BUMP_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_BUMP_WRAP: texture.wrap(tex),
constants.MAP_BUMP_REPEAT: texture.repeat(tex),
constants.MAP_BUMP_SCALE:
material.bump_scale(mat),
}
return bump
def _specular_map(mat):
"""
:param mat:
"""
tex = material.specular_map(mat)
if tex is None:
return
logger.info("Found specular texture map %s", tex.name)
specular = {
constants.MAP_SPECULAR:
texture.file_name(tex),
constants.MAP_SPECULAR_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_SPECULAR_WRAP: texture.wrap(tex),
constants.MAP_SPECULAR_REPEAT: texture.repeat(tex)
}
return specular
def _light_map(mat):
"""
:param mat:
"""
tex = material.light_map(mat)
if tex is None:
return
logger.info("Found light texture map %s", tex.name)
light = {
constants.MAP_LIGHT:
texture.file_name(tex),
constants.MAP_LIGHT_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_LIGHT_WRAP: texture.wrap(tex),
constants.MAP_LIGHT_REPEAT: texture.repeat(tex)
}
return light
def _diffuse_map(mat):
"""
:param mat:
"""
tex = material.diffuse_map(mat)
if tex is None:
return
logger.info("Found diffuse texture map %s", tex.name)
diffuse = {
constants.MAP_DIFFUSE:
texture.file_name(tex),
constants.MAP_DIFFUSE_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_DIFFUSE_WRAP: texture.wrap(tex),
constants.MAP_DIFFUSE_REPEAT: texture.repeat(tex)
}
return diffuse
def _normals(mesh):
"""
:param mesh:
:rtype: []
"""
vectors = []
vectors_ = {}
for face in mesh.tessfaces:
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
str_vec = str(vector)
try:
vectors_[str_vec]
except KeyError:
vectors.append(vector)
vectors_[str_vec] = True
return vectors
def _uvs(mesh):
"""
:param mesh:
"""
uv_layers = []
for layer in mesh.uv_layers:
uv_layers.append([])
for uv_data in layer.data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
if uv_tuple not in uv_layers[-1]:
uv_layers[-1].append(uv_tuple)
return uv_layers
def _armature(mesh):
"""
:param mesh:
"""
obj = object_.objects_using_mesh(mesh)[0]
armature = obj.find_armature()
if armature:
logger.info("Found armature %s for %s", armature.name, obj.name)
else:
logger.info("Found no armature for %s", obj.name)
return armature
def _skinning_data(mesh, bone_map, influences, array_index):
"""
:param mesh:
:param bone_map:
:param influences:
:param array_index:
"""
armature = _armature(mesh)
manifest = []
if not armature:
return manifest
obj = object_.objects_using_mesh(mesh)[0]
logger.debug("Skinned object found %s", obj.name)
for vertex in mesh.vertices:
bone_array = []
for group in vertex.groups:
bone_array.append((group.group, group.weight))
bone_array.sort(key=operator.itemgetter(1), reverse=True)
for index in range(influences):
if index >= len(bone_array):
manifest.append(0)
continue
name = obj.vertex_groups[bone_array[index][0]].name
for bone_index, bone in enumerate(armature.pose.bones):
if bone.name != name:
continue
if array_index is 0:
entry = bone_map.get(bone_index, -1)
else:
entry = bone_array[index][1]
manifest.append(entry)
break
else:
manifest.append(0)
return manifest
def _pose_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
armature_matrix = armature.matrix_world
for bone_count, pose_bone in enumerate(armature.pose.bones):
armature_bone = pose_bone.bone
bone_index = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_bone = armature_bone.parent
parent_matrix = armature_matrix * parent_bone.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = index = 0
for pose_parent in armature.pose.bones:
armature_parent = pose_parent.bone.name
if armature_parent == parent_bone.name:
bone_index = index
index += 1
bone_map[bone_count] = bone_count
pos, rot, scl = bone_matrix.decompose()
bones_.append({
constants.PARENT: bone_index,
constants.NAME: armature_bone.name,
constants.POS: (pos.x, pos.z, -pos.y),
constants.ROTQ: (rot.x, rot.z, -rot.y, rot.w),
constants.SCL: (scl.x, scl.z, scl.y)
})
return bones_, bone_map
def _rest_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
bone_index_rel = 0
for bone in armature.data.bones:
logger.info("Parsing bone %s", bone.name)
if not bone.use_deform:
logger.debug("Ignoring bone %s at: %d",
bone.name, bone_index_rel)
continue
if bone.parent is None:
bone_pos = bone.head_local
bone_index = -1
else:
bone_pos = bone.head_local - bone.parent.head_local
bone_index = 0
index = 0
for parent in armature.data.bones:
if parent.name == bone.parent.name:
bone_index = bone_map.get(index)
index += 1
bone_world_pos = armature.matrix_world * bone_pos
x_axis = bone_world_pos.x
y_axis = bone_world_pos.z
z_axis = -bone_world_pos.y
logger.debug("Adding bone %s at: %s, %s",
bone.name, bone_index, bone_index_rel)
bone_map[bone_count] = bone_index_rel
bone_index_rel += 1
# @TODO: the rotq probably should not have these
# hard coded values
bones_.append({
constants.PARENT: bone_index,
constants.NAME: bone.name,
constants.POS: (x_axis, y_axis, z_axis),
constants.ROTQ: (0, 0, 0, 1)
})
bone_count += 1
return (bones_, bone_map)
|
mit
|
ad-m/django-avatar
|
avatar/templatetags/avatar_tags.py
|
4
|
3479
|
from django import template
try:
from django.urls import reverse
except ImportError:
# For Django < 1.10
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
# Issue 182: six no longer included with Django 3.0
try:
from django.utils import six
except ImportError:
import six
from django.utils.translation import ugettext as _
from django.utils.module_loading import import_string
from avatar.conf import settings
from avatar.models import Avatar
from avatar.utils import (
cache_result,
get_default_avatar_url,
get_user_model,
get_user,
)
register = template.Library()
@cache_result()
@register.simple_tag
def avatar_url(user, size=settings.AVATAR_DEFAULT_SIZE):
for provider_path in settings.AVATAR_PROVIDERS:
provider = import_string(provider_path)
avatar_url = provider.get_avatar_url(user, size)
if avatar_url:
return avatar_url
@cache_result()
@register.simple_tag
def avatar(user, size=settings.AVATAR_DEFAULT_SIZE, **kwargs):
if not isinstance(user, get_user_model()):
try:
user = get_user(user)
alt = six.text_type(user)
url = avatar_url(user, size)
except get_user_model().DoesNotExist:
url = get_default_avatar_url()
alt = _("Default Avatar")
else:
alt = six.text_type(user)
url = avatar_url(user, size)
kwargs.update({'alt': alt})
context = {
'user': user,
'url': url,
'size': size,
'kwargs': kwargs,
}
return render_to_string('avatar/avatar_tag.html', context)
@register.filter
def has_avatar(user):
if not isinstance(user, get_user_model()):
return False
return Avatar.objects.filter(user=user, primary=True).exists()
@cache_result()
@register.simple_tag
def primary_avatar(user, size=settings.AVATAR_DEFAULT_SIZE):
"""
This tag tries to get the default avatar for a user without doing any db
requests. It achieve this by linking to a special view that will do all the
work for us. If that special view is then cached by a CDN for instance,
we will avoid many db calls.
"""
alt = six.text_type(user)
url = reverse('avatar_render_primary', kwargs={'user': user, 'size': size})
return ("""<img src="%s" alt="%s" width="%s" height="%s" />""" %
(url, alt, size, size))
@cache_result()
@register.simple_tag
def render_avatar(avatar, size=settings.AVATAR_DEFAULT_SIZE):
if not avatar.thumbnail_exists(size):
avatar.create_thumbnail(size)
return """<img src="%s" alt="%s" width="%s" height="%s" />""" % (
avatar.avatar_url(size), six.text_type(avatar), size, size)
@register.tag
def primary_avatar_object(parser, token):
split = token.split_contents()
if len(split) == 4:
return UsersAvatarObjectNode(split[1], split[3])
raise template.TemplateSyntaxError('%r tag takes three arguments.' %
split[0])
class UsersAvatarObjectNode(template.Node):
def __init__(self, user, key):
self.user = template.Variable(user)
self.key = key
def render(self, context):
user = self.user.resolve(context)
key = self.key
avatar = Avatar.objects.filter(user=user, primary=True)
if avatar:
context[key] = avatar[0]
else:
context[key] = None
return six.text_type()
|
bsd-3-clause
|
40023256/2015cd_midterm-
|
static/Brython3.1.1-20150328-091302/Lib/binascii.py
|
620
|
24585
|
"""A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
if trailingdata.strip('\x00'):
raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return bytes(result, __BRYTHON__.charset)
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, bytes)):
raise TypeError("expected string, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if isinstance(c, int):
c = chr(c)
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset)
table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\
"0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield s[0], s[1], s[2]
except IndexError:
s += b'\0\0'
yield s[0], s[1], s[2]
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = ord(final[0])
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = ord(final[0])
b = ord(final[1])
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return bytes(''.join(result) + snippet + '\n',__BRYTHON__.charset)
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return bytes(''.join(odata), __BRYTHON__.charset)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
# should this return a bytes object?
#return (bytes(''.join(result), __BRYTHON__.charset), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result.extend(['\x90', '\x00'])
elif count < 4:
if prev != '\x90':
result.extend([prev] * count)
else:
result.extend(['\x90', '\x00'] * count)
else:
if prev != '\x90':
result.extend([prev, '\x90', chr(count)])
else:
result.extend(['\x90', '\x00', '\x90', chr(count)])
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result.append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
]
def crc32(s, crc=0):
result = 0
crc = ~int(crc) & 0xffffffff
#crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8)
#crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffff
if result > 2**31:
result = ((result + 2**31) % 2**32) - 2**31
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return bytes(''.join(result), __BRYTHON__.charset)
unhexlify = a2b_hex
|
gpl-3.0
|
alexforencich/python-ivi
|
ivi/tektronix/tektronixDPO5104.py
|
1
|
1552
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixDPO5000 import *
class tektronixDPO5104(tektronixDPO5000):
"Tektronix DPO5104 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DPO5104')
super(tektronixDPO5104, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._bandwidth = 1e9
self._init_channels()
|
mit
|
mdworks2016/work_development
|
Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/pip/_vendor/pep517/check.py
|
22
|
5885
|
"""Check a project and backend by attempting to build using PEP 517 hooks.
"""
import argparse
import logging
import os
from os.path import isfile, join as pjoin
from pip._vendor.pytoml import TomlError, load as toml_load
import shutil
from subprocess import CalledProcessError
import sys
import tarfile
from tempfile import mkdtemp
import zipfile
from .colorlog import enable_colourful_output
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
log = logging.getLogger(__name__)
def check_build_sdist(hooks, build_sys_requires):
with BuildEnvironment() as env:
try:
env.pip_install(build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_sdist({})
log.info('Got build requires: %s', reqs)
except Exception:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build sdist in %s', td)
try:
try:
filename = hooks.build_sdist(td, {})
log.info('build_sdist returned %r', filename)
except Exception:
log.info('Failure in build_sdist', exc_info=True)
return False
if not filename.endswith('.tar.gz'):
log.error(
"Filename %s doesn't have .tar.gz extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info("Output file %s exists", path)
else:
log.error("Output file %s does not exist", path)
return False
if tarfile.is_tarfile(path):
log.info("Output file is a tar file")
else:
log.error("Output file is not a tar file")
return False
finally:
shutil.rmtree(td)
return True
def check_build_wheel(hooks, build_sys_requires):
with BuildEnvironment() as env:
try:
env.pip_install(build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_wheel({})
log.info('Got build requires: %s', reqs)
except Exception:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build wheel in %s', td)
try:
try:
filename = hooks.build_wheel(td, {})
log.info('build_wheel returned %r', filename)
except Exception:
log.info('Failure in build_wheel', exc_info=True)
return False
if not filename.endswith('.whl'):
log.error("Filename %s doesn't have .whl extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info("Output file %s exists", path)
else:
log.error("Output file %s does not exist", path)
return False
if zipfile.is_zipfile(path):
log.info("Output file is a zip file")
else:
log.error("Output file is not a zip file")
return False
finally:
shutil.rmtree(td)
return True
def check(source_dir):
pyproject = pjoin(source_dir, 'pyproject.toml')
if isfile(pyproject):
log.info('Found pyproject.toml')
else:
log.error('Missing pyproject.toml')
return False
try:
with open(pyproject) as f:
pyproject_data = toml_load(f)
# Ensure the mandatory data can be loaded
buildsys = pyproject_data['build-system']
requires = buildsys['requires']
backend = buildsys['build-backend']
log.info('Loaded pyproject.toml')
except (TomlError, KeyError):
log.error("Invalid pyproject.toml", exc_info=True)
return False
hooks = Pep517HookCaller(source_dir, backend)
sdist_ok = check_build_sdist(hooks, requires)
wheel_ok = check_build_wheel(hooks, requires)
if not sdist_ok:
log.warning('Sdist checks failed; scroll up to see')
if not wheel_ok:
log.warning('Wheel checks failed')
return sdist_ok
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument(
'source_dir',
help="A directory containing pyproject.toml")
args = ap.parse_args(argv)
enable_colourful_output()
ok = check(args.source_dir)
if ok:
print(ansi('Checks passed', 'green'))
else:
print(ansi('Checks failed', 'red'))
sys.exit(1)
ansi_codes = {
'reset': '\x1b[0m',
'bold': '\x1b[1m',
'red': '\x1b[31m',
'green': '\x1b[32m',
}
def ansi(s, attr):
if os.name != 'nt' and sys.stdout.isatty():
return ansi_codes[attr] + str(s) + ansi_codes['reset']
else:
return str(s)
if __name__ == '__main__':
main()
|
apache-2.0
|
tfukushima/midonet-kubernetes
|
midonet_kubernetes/plugin.py
|
2
|
25380
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import logging
import os
import requests
import socket
import sys
import traceback
import docker
from neutronclient.common import exceptions as n_exceptions
from neutronclient.v2_0 import client as client_v2
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from pybrctl import pybrctl
import pyroute2
# from midonet_kubernetes import actions
import actions
import exceptions
LOG_PATH = '/var/log/midonet-kubernetes/plugin.log'
logging.basicConfig(filename=LOG_PATH, level=logging.DEBUG)
logger = logging.getLogger(__name__)
BINDING_EXECUTABLE = '/usr/bin/mm-ctl'
BIND = '--bind-port'
UNBIND = '--unbind-port'
HOST = os.environ.get('OS_HOST', '')
ENDPOINT_URL = 'http://{0}:9696'.format(HOST)
USERNAME = 'admin'
TENANT_NAME = 'admin'
PASSWORD = 'midonet'
AUTH_URL = 'http://{0}:35357/v2.0'.format(HOST)
NETNS_PREFIX = '/var/run/netns/'
PROC_TEMPLATE = '/proc/{0}/ns/net'
GLOBAL_ROUTER_NAME = 'midonet-kubernetes'
# GLOBAL_ROUTER_NAME = 'my_router'
SUBNET_RANGE = os.environ.get('SERVICE_CLUSTER_IP_RANGE', '192.168.3.0/24')
KUBE_API_SERVER_HOST = '10.240.0.12'
KUBE_API_SERVER_PORT = '8080'
KUBE_API_SERVER_URL = 'http://{0}:{1}/api/v1'.format(
KUBE_API_SERVER_HOST, KUBE_API_SERVER_PORT)
neutron = client_v2.Client(endpoint_url=ENDPOINT_URL, timeout=30,
username=USERNAME, tenant_name=TENANT_NAME,
password=PASSWORD, auth_url=AUTH_URL)
neutron.format = 'json'
docker_client = docker.Client(base_url='unix:///var/run/docker.sock')
docker_bridge = pybrctl.Bridge("docker0")
def get_hostname():
"""Returns the host name."""
return socket.gethostname()
def _get_short_docker_id(docker_id):
return docker_id[:12]
def _get_network_name(pod_namespace, host_name):
# return '-'.join([pod_namespace, host_name])
return pod_namespace
def _call_k8s_api(endpoint='/'):
response = requests.get(KUBE_API_SERVER_URL + endpoint)
return response.json()
def get_services(pod_namespace):
return _call_k8s_api('/namespaces/{0}/services'.format(pod_namespace))
def get_service(pod_namespace, service_name):
return _call_k8s_api('/namespaces/{0}/services/{1}'
.format(pod_namespace, service_name))
def _get_networks_by_attrs(unique=True, **attrs):
networks = neutron.list_networks(**attrs)
if unique and len(networks.get('networks', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron networks exist for the params {0}"
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return networks['networks']
def _get_subnets_by_attrs(unique=True, **attrs):
subnets = neutron.list_subnets(**attrs)
if unique and len(subnets.get('subnets', [])) > 2: # subnets for IPv4 and/or IPv6
raise exceptions.DuplicatedResourceException(
"Multiple Neutron subnets exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return subnets['subnets']
def _get_ports_by_attrs(unique=True, **attrs):
ports = neutron.list_ports(**attrs)
if unique and len(ports.get('ports', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron ports exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return ports['ports']
def _get_routers_by_attrs(unique=True, **attrs):
routers = neutron.list_routers(**attrs)
if unique and len(routers.get('routers', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron routers exist for the params {0}"
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return routers['routers']
def _get_vips_by_attrs(unique=True, **attrs):
vips = neutron.list_vips(**attrs)
if unique and len(vips.get('vips', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron vips exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return vips['vips']
def _get_pools_by_attrs(unique=True, **attrs):
pools = neutron.list_pools(**attrs)
if unique and len(pools.get('pools', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron pools exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return pools['pools']
def _get_members_by_attrs(unique=True, **attrs):
members = neutron.list_members(**attrs)
if unique and len(members.get('members', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron members exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return members['members']
def _get_router_ports_by_subnet_id(neutron_subnet_id, neutron_port_list):
router_ports = [
port for port in neutron_port_list
if ((neutron_subnet_id in [fip['subnet_id']
for fip in port.get('fixed_ips', [])])
or (neutron_subnet_id == port.get('subnet_id', '')))]
return router_ports
def init():
"""Initializes the network plugin.
This function is called when 'init' is given as the first argument.
"""
logger.info('Initialized the plugin')
def get_veth_name_for_container(container_info):
"""Returns the name of the veth interface associated with the container
:param container_info: the container info dictionary returned by Docker API
:returns: the veth name as string
"""
logger.info(container_info)
if not os.path.exists(NETNS_PREFIX):
os.mkdir(NETNS_PREFIX)
pid = container_info['State']['Pid']
proc_dir = PROC_TEMPLATE.format(pid)
netns_symlink_path = NETNS_PREFIX + str(pid)
veth_name = ''
try:
if not os.path.exists(netns_symlink_path):
os.symlink(proc_dir, netns_symlink_path)
logger.debug('Created a symlink {0}'.format(netns_symlink_path))
container_netns = pyroute2.IPDB(nl=pyroute2.NetNS(str(pid)))
main_netns = pyroute2.IPDB()
try:
logger.debug(container_netns.interfaces)
# logger.debug(main_netns.interfaces)
with container_netns.by_name['eth0'] as eth0:
eth0_index = eth0['index']
veth_index = eth0_index + 1
with main_netns.by_index[veth_index] as veth:
veth_name = veth['ifname']
finally:
container_netns.release()
main_netns.release()
finally:
if os.path.exists(netns_symlink_path):
os.remove(netns_symlink_path)
logger.debug('Deleted the symlink {0}'.format(netns_symlink_path))
return veth_name
def _get_or_create_subnet(container_info, neutron_network_id=''):
ip_address = container_info['NetworkSettings']['IPAddress']
prefixlen = container_info['NetworkSettings']['IPPrefixLen']
gateway_ip = container_info['NetworkSettings']['Gateway']
cidr = netaddr.IPNetwork('/'.join([ip_address, str(prefixlen)]))
subnet_network = str(cidr.network)
subnet_cidr = '/'.join([subnet_network, str(cidr.prefixlen)])
created_subnet = {}
subnets = _get_subnets_by_attrs(cidr=str(subnet_cidr),
network_id=neutron_network_id)
if not subnets:
new_subnet = {
'network_id': neutron_network_id,
'ip_version': cidr.version,
'cidr': subnet_cidr,
'gateway_ip': gateway_ip,
'enable_dhcp': False,
}
created_subnet_response = neutron.create_subnet({'subnet': new_subnet})
created_subnet = created_subnet_response['subnet']
else:
created_subnet = subnets[0]
logger.debug('Reusing the existing subnet {0}'
.format(created_subnet['id']))
return created_subnet
def _get_or_create_cluster_ip_subnet(neutron_network_id=''):
ip_network = netaddr.IPNetwork(SUBNET_RANGE)
subnets = _get_subnets_by_attrs(cidr=SUBNET_RANGE,
network_id=neutron_network_id)
if not subnets:
new_subnet = {
'network_id': neutron_network_id,
'ip_version': ip_network.version,
'cidr': SUBNET_RANGE,
'enable_dhcp': False,
}
created_subnet_response = neutron.create_subnet({'subnet': new_subnet})
created_subnet = created_subnet_response['subnet']
else:
created_subnet = subnets[0]
logger.debug('Reusing the existing subnet {0}'
.format(created_subnet['id']))
return created_subnet
def _get_or_create_router(pod_namespace):
router_name = pod_namespace
routers = _get_routers_by_attrs(name=router_name)
router = {}
if not routers:
created_router_resopnse = neutron.create_router(
{'router': {'name': router_name}})
router = created_router_resopnse['router']
logger.debug('Created the router {0}'.format(router))
else:
router = routers[0]
logger.debug('Reusing the router {0}'.format(router['id']))
return router
def _get_or_create_pools_and_vips(service_name, subnet_id, service_spec):
cluster_ip = service_spec['clusterIP']
ports = service_spec['ports']
pools = []
vips = []
for port in ports:
protocol = port['protocol']
protocol_port = port['targetPort']
neutron_pools = _get_pools_by_attrs(
name=service_name, protocol=protocol, subnet_id=subnet_id)
neutron_pool = {}
if not neutron_pools:
pool_request = {
'pool': {
'name': service_name,
'protocol': protocol,
'subnet_id': subnet_id,
'lb_method': 'ROUND_ROBIN',
},
}
neutron_pool_response = neutron.create_pool(pool_request)
neutron_pool = neutron_pool_response['pool']
else:
neutron_pool = neutron_pools[0]
pools.append(neutron_pool)
pool_id = neutron_pool['id']
neutron_vips = _get_vips_by_attrs(
name=service_name, protocol=protocol, subnet_id=subnet_id,
pool_id=pool_id, address=cluster_ip)
neutron_vip = {}
if not neutron_vips:
vip_request = {
'vip': {
# name is not necessary unique and the service name is
# used for the group of the vips.
'name': service_name,
'pool_id': pool_id,
'subnet_id': subnet_id,
'address': cluster_ip,
'protocol': protocol,
'protocol_port': protocol_port,
},
}
neutron_vip_response = neutron.create_vip(vip_request)
neutron_vip = neutron_vip_response['vip']
else:
neutron_vip = neutron_vips[0]
vips.append(neutron_vip)
return (pools, vips)
def _get_ip_address_in_port(neutron_port):
ip_address = neutron_port.get('ip_address', '')
fixed_ips = neutron_port.get('fixed_ips', [])
if not ip_address:
for fixed_ip in fixed_ips:
ip = netaddr.IPAddress(fixed_ip['ip_address'])
if ip.version == 4:
ip_address = fixed_ip['ip_address']
break
return ip_address
def _create_port(container_info, neutron_network_id,
neutron_subnet_id, pod_name):
ip_address = container_info['NetworkSettings']['IPAddress']
mac_address = container_info['NetworkSettings']['MacAddress']
new_port = {
'name': pod_name,
'network_id': neutron_network_id,
'mac_address': mac_address,
'fixed_ips': [{
'subnet_id': neutron_subnet_id,
'ip_address': ip_address,
}],
}
created_port_response = neutron.create_port({'port': new_port})
created_port = created_port_response['port']
return created_port
def get_service_name(pod_name):
"""Returns the service name from the pod name."""
return pod_name[:-6]
def _emulate_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, neutron_port):
service_name = get_service_name(pod_name)
service = get_service(pod_namespace, service_name)
service_spec = service['spec']
pools, vips = _get_or_create_pools_and_vips(
service_name, cluster_ip_subnet_id, service_spec)
# NOTE(tfukushima): The current Neutron model assumes the single VIP can be
# create under the same subnet, which is not true in K8s assumption. This
# introduces the limitation that we support only the single "port" entity
# in the "ports" secton of the spec file.
neutron_pool = pools[0]
neutron_vip = vips[0]
member_request = {
'member': {
'pool_id': neutron_pool['id'],
'address': _get_ip_address_in_port(neutron_port),
'protocol_port': neutron_vip['protocol_port'],
'weight': 1,
}
}
neutron_member_response = neutron.create_member(member_request)
neutron_member = neutron_member_response['member']
logger.debug('Created a new member {0} for the pool {1} associated with the'
'vip {2}'
.format(neutron_member['id'], neutron_pool['id'],
neutron_vip['id']))
def _cleanup_emulated_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, port):
service_name = get_service_name(pod_name)
pools = _get_pools_by_attrs(name=service_name, subnet_id=cluster_ip_subnet_id)
vips = _get_vips_by_attrs(name=service_name, subnet_id=cluster_ip_subnet_id)
if pools:
neutron_pool = pools[0]
neutron_vip = vips[0]
address = _get_ip_address_in_port(port)
pool_id = neutron_pool['id']
members = _get_members_by_attrs(address=address, pool_id=pool_id)
member = members[0]
neutron.delete_member(member['id'])
vip_id = neutron_vip['id']
try:
neutron.delete_vip(vip_id)
except n_exceptions.Conflict:
logger.info('The vip {0} is still in use.'.format(vip_id))
try:
neutron.delete_pool(pool_id)
except n_exceptions.Conflict:
logger.info('The pool {0} is still in use.'.format(pool_id))
logger.debug('Successfully cleaned the emulated kube-proxy resources up')
@lockutils.synchronized('k8s-np-lock', lock_file_prefix='k8s-np-lock',
external=True, lock_path='/tmp/')
def setup(pod_namespace, pod_name, container_id):
"""Creates the network for the container.
This function is called when 'setup' is given as the first argument.
"""
network = {}
# Map Pod's namespace into Neutron network.
network_name = _get_network_name(pod_namespace, get_hostname())
networks = _get_networks_by_attrs(name=network_name)
if not networks:
created_network_response = neutron.create_network(
{'network': {'name': network_name}})
network = created_network_response['network']
logger.debug('Created the network {0}'.format(network))
else:
network = networks[0]
logger.debug('Reusing the network {0}'.format(network['id']))
neutron_network_id = network['id']
container_info = docker_client.inspect_container(container_id)
# Create a new subnet if the corresponding one doesn't exist.
subnet = _get_or_create_subnet(container_info, network['id'])
router = _get_or_create_router(GLOBAL_ROUTER_NAME)
neutron_router_id = router['id']
neutron_subnet_id = subnet['id']
filtered_ports = _get_ports_by_attrs(
unique=False, device_owner='network:router_interface',
device_id=neutron_router_id, network_id=neutron_network_id)
router_ports = _get_router_ports_by_subnet_id(
neutron_subnet_id, filtered_ports)
if not router_ports:
neutron.add_interface_router(
neutron_router_id, {'subnet_id': neutron_subnet_id})
else:
logger.debug('The subnet {0} is already bound to the router'
.format(neutron_subnet_id))
cluster_ip_subnet = _get_or_create_cluster_ip_subnet(network['id'])
cluster_ip_subnet_id = cluster_ip_subnet['id']
cluster_ip_router_ports = _get_router_ports_by_subnet_id(
cluster_ip_subnet_id, filtered_ports)
if not cluster_ip_router_ports:
neutron.add_interface_router(
neutron_router_id, {'subnet_id': cluster_ip_subnet_id})
else:
logger.debug('The cluster IP subnet {0} is already bound to the router'
.format(cluster_ip_subnet_id))
port = _create_port(container_info, neutron_network_id,
neutron_subnet_id, pod_name)
logger.debug('Created a new port {0}'.format(port['id']))
_emulate_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, port)
# Getting the veth name.
veth_name = get_veth_name_for_container(container_info)
docker_bridge.delif(veth_name)
port_id = port['id']
try:
stdout, stderr = processutils.execute(
BINDING_EXECUTABLE, BIND, port_id, veth_name,
run_as_root=True)
except processutils.ProcessExecutionError as ex:
logger.error('Binding the port is failed: {0}'.format(ex))
sys.exit(-1)
logger.debug('Successfully bound the port {0} to {1}'
.format(port_id, veth_name))
@lockutils.synchronized('k8s-np-lock', lock_file_prefix='k8s-np-lock',
external=True, lock_path='/tmp/')
def teardown(pod_namespace, pod_name, container_id):
"""Destroys the network for the container.
This function is called when 'teardown' is given as the first argument.
"""
network_name = _get_network_name(pod_namespace, get_hostname())
filtered_networks = _get_networks_by_attrs(name=network_name)
neutron_network_id = filtered_networks[0]['id']
container_info = docker_client.inspect_container(container_id)
filtered_ports = _get_ports_by_attrs(name=pod_name)
if filtered_ports:
port = filtered_ports[0]
port_id = port['id']
try:
stdout, stderr = processutils.execute(
BINDING_EXECUTABLE, UNBIND, port_id, run_as_root=True)
except processutils.ProcessExecutionError as ex:
logger.error('Unbinding the port is failed: {0}'.format(ex))
sys.exit(-1)
logger.debug('Successfully unbound the port {0}'.format(port_id))
cluster_ip_subnet = _get_or_create_cluster_ip_subnet(neutron_network_id)
cluster_ip_subnet_id = cluster_ip_subnet['id']
_cleanup_emulated_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, port)
neutron.delete_port(port_id)
logger.debug('Successfuly deleted the port {0}'.format(port_id))
subnet = _get_or_create_subnet(container_info, neutron_network_id)
neutron_subnet_id = subnet['id']
router = _get_or_create_router(GLOBAL_ROUTER_NAME)
neutron_router_id = router['id']
filtered_ports = _get_ports_by_attrs(
unique=False, device_owner='network:router_interface',
device_id=neutron_router_id, network_id=neutron_network_id)
router_ports = _get_router_ports_by_subnet_id(neutron_subnet_id, filtered_ports)
if len(router_ports) == 1:
neutron.remove_interface_router(
neutron_router_id, {'subnet_id': neutron_subnet_id})
logger.debug('The subnet {0} is unbound from the router {1}'
.format(neutron_subnet_id, neutron_router_id))
try:
neutron.delete_subnet(neutron_subnet_id)
logger.debug('Deleted the subnet {0}'.format(neutron_subnet_id))
except n_exceptions.Conflict as ex:
logger.info('The subnet {0} is still in use.'
.format(neutron_subnet_id))
cluster_ip_router_ports = _get_router_ports_by_subnet_id(
cluster_ip_subnet_id, filtered_ports)
if len(cluster_ip_router_ports) == 1:
neutron.remove_interface_router(
neutron_router_id, {'subnet_id': cluster_ip_subnet_id})
logger.debug('The cluster IP subnet {0} is unbound from the router {1}'
.format(cluster_ip_subnet_id, neutron_router_id))
try:
neutron.delete_subnet(cluster_ip_subnet_id)
logger.debug('The cluseter IP subnet {0} is deleted successfully.'
.format(cluster_ip_subnet_id))
except n_exceptions.Conflict:
logger.info('The cluseter IP subnet {0} is still in use.'
.format(cluster_ip_subnet_id))
try:
neutron.delete_network(neutron_network_id)
except n_exceptions.Conflict as ex:
logger.info('The network {0} is still in use.'
.format(neutron_network_id))
logger.debug('Deleleted the network {0}'.format(neutron_network_id))
def status(pod_namespace, pod_name, container_id):
"""Reports the status of the containers identifed by the given information.
This function is called when 'status' is given as the first argument.
"""
network_name = pod_namespace + get_hostname()
filtered_networks = _get_networks_by_attrs(name=network_name)
if not filtered_networks:
return
network = filtered_networks[0]
neutron_network_id = network['id']
filtered_ports = _get_ports_by_attrs(
name=pod_name, network_id=neutron_network_id)
if not filtered_ports:
return
port = filtered_ports[0]
ip_address = _get_ip_address_in_port(port)
status_response = {
"apiVersion": "v1beta1",
"kind": "PodNetworkStatus",
}
status_response['ip'] = ip_address
logger.debug('Sending the status of {0}, {1}: {2}'
.format(pod_name, network_name, status_response))
sys.stdout.write(json.dumps(status_response))
def dispatch(action, pod_namespace=None, pod_name=None, container_id=None):
"""Run the actual action with the given arguments.
Curretly the following actions are supported.
- init
- setup <pod_namespace> <pod_name> <container_id>
- teardown <pod_namespace> <pod_name> <container_id>
- status <pod_namespace> <pod_name> <container_id>
After executing the action, it exits with the return code 0. Otherwise it
eixits with the non-zero return code.
See the following link for more details.
- https://godoc.org/github.com/kubernetes/kubernetes/pkg/kubelet/network/exec # noqa
"""
if action == actions.INIT:
logger.debug('init is called.')
init()
elif action == actions.SETUP:
logger.debug('setup is called.')
setup(pod_namespace, pod_name, container_id)
elif action == actions.TEARDOWN:
logger.debug('teardown is called.')
teardown(pod_namespace, pod_name, container_id)
elif action == actions.STATUS:
logger.debug('status is called.')
status(pod_namespace, pod_name, container_id)
sys.exit(0)
def _dispatch_log():
"""Dispatches the action and catch exceptions to be logged.
After executing the action, it exits with return code 0 if it succeeded to
run through. Othereise it exits with the non-zero return code.
"""
args = sys.argv
action = args[1]
pod_namespace = args[2] if len(args) > 3 else None
pod_name = args[3] if len(args) > 4 else None
container_id = args[4] if len(args) >= 5 else None
logger.debug("MidoNet plugin executable was called with action: {0}, "
"pod_namespace: {1}, pod_name: {2}, container_id: {3}"
.format(action, pod_namespace, pod_name, container_id))
return_code = 0
try:
dispatch(action, pod_namespace=pod_namespace,
pod_name=pod_name,
container_id=container_id)
except SystemExit, e:
return_code = e.code
except Exception, e:
logger.error("Unhandled exception: %s", e)
logger.error(traceback.format_exc())
return_code = -1
finally:
logger.debug("MidoNet plugin succeeded to be executed: %s",
return_code)
sys.exit(return_code)
if __name__ == '__main__':
_dispatch_log()
|
apache-2.0
|
tempbottle/Firefly
|
firefly/dbentrust/mmode.py
|
8
|
9318
|
#coding:utf8
'''
Created on 2013-5-8
@author: lan (www.9miao.com)
'''
from memclient import mclient
from memobject import MemObject
import util
import time
MMODE_STATE_ORI = 0 #未变更
MMODE_STATE_NEW = 1 #创建
MMODE_STATE_UPDATE = 2 #更新
MMODE_STATE_DEL = 3 #删除
TIMEOUT = 1800
def _insert(args):
record,pkname,mmname,cls = args
pk = record[pkname]
mm = cls(mmname+':%s'%pk,pkname,data=record)
mm.insert()
return pk
class PKValueError(ValueError):
"""
"""
def __init__(self, data):
ValueError.__init__(self)
self.data = data
def __str__(self):
return "new record has no 'PK': %s" % (self.data)
class MMode(MemObject):
"""内存数据模型
"""
def __init__(self, name,pk,data={}):
"""
"""
MemObject.__init__(self, name, mclient)
self._state = MMODE_STATE_ORI#对象的状态 0未变更 1新建 2更新 3删除
self._pk = pk
self.data = data
self._time = time.time()
def update(self, key, values):
data = self.get_multi(['data','_state'])
ntime = time.time()
data['data'].update({key:values})
if data.get('_state')==MMODE_STATE_NEW:
props = {'data':data.get('data'),'_time':ntime}
else:
props = {'_state':MMODE_STATE_UPDATE,'data':data.get('data'),'_time':ntime}
return MemObject.update_multi(self, props)
def update_multi(self, mapping):
ntime = time.time()
data = self.get_multi(['data','_state'])
data['data'].update(mapping)
if data.get('_state')==MMODE_STATE_NEW:
props = {'data':data.get('data'),'_time':ntime}
else:
props = {'_state':MMODE_STATE_UPDATE,'data':data.get('data'),'_time':ntime}
return MemObject.update_multi(self, props)
def get(self, key):
ntime = time.time()
MemObject.update(self, "_time", ntime)
return MemObject.get(self, key)
def get_multi(self, keys):
ntime = time.time()
MemObject.update(self, "_time", ntime)
return MemObject.get_multi(self, keys)
def delete(self):
'''删除对象
'''
return MemObject.update(self,'_state',MMODE_STATE_DEL)
def mdelete(self):
"""清理对象
"""
self.syncDB()
MemObject.mdelete(self)
def IsEffective(self):
'''检测对象是否有效
'''
if self.get('_state')==MMODE_STATE_DEL:
return False
return True
def syncDB(self):
"""同步到数据库
"""
state = self.get('_state')
tablename = self._name.split(':')[0]
if state==MMODE_STATE_ORI:
return
elif state==MMODE_STATE_NEW:
props = self.get('data')
pk = self.get('_pk')
result = util.InsertIntoDB(tablename, props)
elif state==MMODE_STATE_UPDATE:
props = self.get('data')
pk = self.get('_pk')
prere = {pk:props.get(pk)}
util.UpdateWithDict(tablename, props, prere)
result = True
else:
pk = self.get('_pk')
props = self.get('data')
prere = {pk:props.get(pk)}
result = util.DeleteFromDB(tablename,prere)
if result:
MemObject.update(self,'_state', MMODE_STATE_ORI)
def checkSync(self,timeout=TIMEOUT):
"""检测同步
"""
ntime = time.time()
objtime = MemObject.get(self, '_time')
if ntime -objtime>=timeout and timeout:
self.mdelete()
else:
self.syncDB()
class MFKMode(MemObject):
"""内存数据模型
"""
def __init__(self, name,pklist = []):
MemObject.__init__(self, name, mclient)
self.pklist = pklist
class MAdmin(MemObject):
def __init__(self, name,pk,timeout=TIMEOUT,**kw):
MemObject.__init__(self, name, mclient)
self._pk = pk
self._fk = kw.get('fk','')
self._incrkey = kw.get('incrkey','')
self._incrvalue = kw.get('incrvalue',0)
self._timeout = timeout
def insert(self):
if self._incrkey and not self.get("_incrvalue"):
self._incrvalue = util.GetTableIncrValue(self._name)
MemObject.insert(self)
def load(self):
'''读取数据到数据库中
'''
mmname = self._name
recordlist = util.ReadDataFromDB(mmname)
for record in recordlist:
pk = record[self._pk]
mm = MMode(self._name+':%s'%pk,self._pk,data=record)
mm.insert()
@property
def madmininfo(self):
keys = self.__dict__.keys()
info = self.get_multi(keys)
return info
def getAllPkByFk(self,fk):
'''根据外键获取主键列表
'''
name = '%s_fk:%s'%(self._name,fk)
fkmm = MFKMode(name)
pklist = fkmm.get('pklist')
if pklist is not None:
return pklist
props = {self._fk:fk}
dbkeylist = util.getAllPkByFkInDB(self._name, self._pk, props)
name = '%s_fk:%s'%(self._name,fk)
fkmm = MFKMode(name, pklist = dbkeylist)
fkmm.insert()
return dbkeylist
def getObj(self,pk):
'''
'''
mm = MMode(self._name+':%s'%pk,self._pk)
if not mm.IsEffective():
return None
if mm.get('data'):
return mm
props = {self._pk:pk}
record = util.GetOneRecordInfo(self._name,props)
if not record:
return None
mm = MMode(self._name+':%s'%pk,self._pk,data = record)
mm.insert()
return mm
def getObjData(self,pk):
'''
'''
mm = MMode(self._name+':%s'%pk,self._pk)
if not mm.IsEffective():
return None
data = mm.get('data')
if mm.get('data'):
return data
props = {self._pk:pk}
record = util.GetOneRecordInfo(self._name,props)
if not record:
return None
mm = MMode(self._name+':%s'%pk,self._pk,data = record)
mm.insert()
return record
def getObjList(self,pklist):
'''
'''
_pklist = []
objlist = []
for pk in pklist:
mm = MMode(self._name+':%s'%pk,self._pk)
if not mm.IsEffective():
continue
if mm.get('data'):
objlist.append(mm)
else:
_pklist.append(pk)
if _pklist:
recordlist = util.GetRecordList(self._name, self._pk,_pklist)
for record in recordlist:
pk = record[self._pk]
mm = MMode(self._name+':%s'%pk,self._pk,data = record)
mm.insert()
objlist.append(mm)
return objlist
def deleteMode(self,pk):
'''
'''
mm = self.getObj(pk)
if mm:
if self._fk:
data = mm.get('data')
if data:
fk = data.get(self._fk,0)
name = '%s_fk:%s'%(self._name,fk)
fkmm = MFKMode(name)
pklist = fkmm.get('pklist')
if pklist and pk in pklist:
pklist.remove(pk)
fkmm.update('pklist', pklist)
mm.delete()
return True
def checkAll(self):
key = '%s:%s:'%(mclient._hostname,self._name)
_pklist = util.getallkeys(key, mclient.connection)
for pk in _pklist:
mm = MMode(self._name+':%s'%pk,self._pk)
if not mm.IsEffective():
mm.mdelete()
continue
if not mm.get('data'):
continue
mm.checkSync(timeout=self._timeout)
self.deleteAllFk()
def deleteAllFk(self):
"""删除所有的外键
"""
key = '%s:%s_fk:'%(mclient._hostname,self._name)
_fklist = util.getallkeys(key, mclient.connection)
for fk in _fklist:
name = '%s_fk:%s'%(self._name,fk)
fkmm = MFKMode(name)
fkmm.mdelete()
def new(self,data):
"""创建一个新的对象
"""
incrkey = self._incrkey
if incrkey:
incrvalue = self.incr('_incrvalue', 1)
data[incrkey] = incrvalue - 1
pk = data.get(self._pk)
if pk is None:
raise PKValueError(data)
mm = MMode(self._name+':%s'%pk,self._pk,data=data)
setattr(mm,incrkey,pk)
else:
pk = data.get(self._pk)
mm = MMode(self._name+':%s'%pk,self._pk,data=data)
if self._fk:
fk = data.get(self._fk,0)
name = '%s_fk:%s'%(self._name,fk)
fkmm = MFKMode(name)
pklist = fkmm.get('pklist')
if pklist is None:
pklist = self.getAllPkByFk(fk)
pklist.append(pk)
fkmm.update('pklist', pklist)
setattr(mm,'_state',MMODE_STATE_NEW)
mm.insert()
return mm
|
mit
|
FlashGordon95/Financial-Portfolio-Flask
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langcyrillicmodel.py
|
2762
|
17725
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
|
mit
|
yojota/volatility
|
volatility/plugins/linux/sk_buff_cache.py
|
58
|
2810
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import os
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
from volatility.plugins.linux.slab_info import linux_slabinfo
class linux_sk_buff_cache(linux_common.AbstractLinuxCommand):
"""Recovers packets from the sk_buff kmem_cache"""
def __init__(self, config, *args, **kwargs):
self.edir = None
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('UNALLOCATED', short_option = 'u', default = False, help = 'Show unallocated', action = 'store_true')
self._config.add_option('DUMP-DIR', short_option = 'D', default = None, help = 'output directory for recovered packets', action = 'store', type = 'str')
def write_sk_buff(self, s):
pkt_len = s.len
# keep sane sized packets
if 0 < pkt_len < 0x6400000:
start = s.data
data = self.addr_space.zread(start, pkt_len)
fname = "{0:x}".format(s.obj_offset)
fd = open(os.path.join(self.edir, fname), "wb")
fd.write(data)
fd.close()
yield "Wrote {0:d} bytes to {1:s}".format(pkt_len, fname)
def walk_cache(self, cache_name):
cache = linux_slabinfo(self._config).get_kmem_cache(cache_name, self._config.UNALLOCATED, struct_name = "sk_buff")
if not cache:
return
for s in cache:
for msg in self.write_sk_buff(s):
yield msg
def calculate(self):
linux_common.set_plugin_members(self)
self.edir = self._config.DUMP_DIR
if not self.edir:
debug.error("No output directory given.")
for msg in self.walk_cache("skbuff_head_cache"):
yield msg
for msg in self.walk_cache("skbuff_fclone_cache"):
yield msg
def render_text(self, outfd, data):
for msg in data:
outfd.write("{0:s}\n".format(msg))
|
gpl-2.0
|
ptisserand/ansible
|
test/runner/lib/cloud/cs.py
|
40
|
8903
|
"""CloudStack plugin for integration tests."""
from __future__ import absolute_import, print_function
import json
import os
import re
import time
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
ApplicationError,
display,
SubprocessError,
is_shippable,
)
from lib.http import (
HttpClient,
HttpError,
urlparse,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
docker_network_inspect,
get_docker_container_id,
)
try:
# noinspection PyPep8Naming
import ConfigParser as configparser
except ImportError:
# noinspection PyUnresolvedReferences
import configparser
class CsCloudProvider(CloudProvider):
"""CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(CsCloudProvider, self).__init__(args, config_extension='.ini')
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
self.image = 'ansible/ansible:cloudstack-simulator@sha256:885aedb7f34ce7114eaa383a2541ede93c4f8cb543c05edf90b694def67b1a6a'
self.container_name = ''
self.endpoint = ''
self.host = ''
self.port = 0
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
% (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(CsCloudProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_remote_ssh_options(self):
"""Get any additional options needed when delegating tests to a remote instance via SSH.
:rtype: list[str]
"""
if self.managed:
return ['-R', '8888:localhost:8888']
return []
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
if is_shippable():
docker_rm(self.args, self.container_name)
elif not self.args.explain:
display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
super(CsCloudProvider, self).cleanup()
def _setup_static(self):
"""Configure CloudStack tests for use with static configuration."""
parser = configparser.RawConfigParser()
parser.read(self.config_static_path)
self.endpoint = parser.get('cloudstack', 'endpoint')
parts = urlparse(self.endpoint)
self.host = parts.hostname
if not self.host:
raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
if parts.port:
self.port = parts.port
elif parts.scheme == 'http':
self.port = 80
elif parts.scheme == 'https':
self.port = 443
else:
raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
self._wait_for_service()
def _setup_dynamic(self):
"""Create a CloudStack simulator using docker."""
config = self._read_config_template()
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0]['State']['Running']:
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
else:
display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
docker_pull(self.args, self.image)
docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
if not self.args.explain:
display.notice('The CloudStack simulator will probably be ready in 5 - 10 minutes.')
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.host = self._get_simulator_address()
display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
else:
self.host = 'localhost'
self.port = 8888
self.endpoint = 'http://%s:%d' % (self.host, self.port)
self._wait_for_service()
if self.args.explain:
values = dict(
HOST=self.host,
PORT=str(self.port),
)
else:
credentials = self._get_credentials()
if self.args.docker:
host = self.DOCKER_SIMULATOR_NAME
else:
host = self.host
values = dict(
HOST=host,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],
)
config = self._populate_config_template(config, values)
self._write_config(config)
def _get_simulator_address(self):
networks = docker_network_inspect(self.args, 'bridge')
try:
bridge = [network for network in networks if network['Name'] == 'bridge'][0]
containers = bridge['Containers']
container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
except:
display.error('Failed to process the following docker network inspect output:\n%s' %
json.dumps(networks, indent=4, sort_keys=True))
raise
def _wait_for_service(self):
"""Wait for the CloudStack service endpoint to accept connections."""
if self.args.explain:
return
client = HttpClient(self.args, always=True)
endpoint = self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
try:
client.get(endpoint)
return
except SubprocessError:
pass
time.sleep(30)
raise ApplicationError('Timeout waiting for CloudStack service.')
def _get_credentials(self):
"""Wait for the CloudStack simulator to return credentials.
:rtype: dict[str, str]
"""
client = HttpClient(self.args, always=True)
endpoint = '%s/admin.json' % self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
response = client.get(endpoint)
if response.status_code == 200:
try:
return response.json()
except HttpError as ex:
display.error(ex)
time.sleep(30)
raise ApplicationError('Timeout waiting for CloudStack credentials.')
class CsCloudEnvironment(CloudEnvironment):
"""CloudStack cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
changes = dict(
CLOUDSTACK_CONFIG=self.config_path,
)
env.update(changes)
cmd.append('-e')
cmd.append('cs_resource_prefix=%s' % self.resource_prefix)
|
gpl-3.0
|
mcgoddard/widgetr
|
env/Lib/site-packages/jinja2/visitor.py
|
1401
|
3316
|
# -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
|
mit
|
prajesh-ananthan/Tools
|
income_worksheet_creator/generate_annual_income_worksheet.py
|
1
|
3139
|
import calendar
import json
import os
from datetime import *
import xlsxwriter
"""generate_annual_income_worksheet.py: Automates my personal Income Statement Excel workbook"""
__author__ = "Prajesh Ananthan 2016"
def create_excel_sheet(year):
workbook = None
table_position = 'B3:D13'
merge_range = 'B2:D2'
column_header_title = 'MONTHLY CASHFLOW'
output_path = 'output/{}'
filename = getfilename()
success = False
[header_title, item_list] = get_data()
options = {'data': item_list, 'columns': header_title}
try:
workbook = xlsxwriter.Workbook(output_path.format(filename))
merge_format = workbook.add_format({
'bold': 2,
'border': 2,
'align': 'center',
'valign': 'vcenter',
'fg_color': 'yellow'})
monthlist = getListofMonths(year)
for month in monthlist:
worksheet = workbook.add_worksheet(name=month)
worksheet.set_column(1, 3, 20)
worksheet.merge_range(merge_range, column_header_title, merge_format)
worksheet.add_table(table_position, options)
success = True
print("INFO: {} created!".format(workbook.filename))
except Exception as e:
print(e, "ERROR: Unable to write onto {}!".format(filename))
finally:
workbook.close()
return [success, workbook.filename]
def launchFileInWindows(unixpath):
win32path = os.path.normcase(unixpath)
try:
if os.path.exists(win32path):
print("INFO: Launching {}...".format(win32path))
os.system(win32path)
except Exception as e:
print(e)
def getListofMonths(year):
date1 = datetime.strptime("{}-01-01".format(year), "%Y-%m-%d")
date2 = datetime.strptime("{}-01-12".format(year + 1), "%Y-%m-%d")
months_str = calendar.month_name
months = []
while date1 < date2:
month = date1.month
year = date1.year
month_str = months_str[month][0:3]
months.append("{0}-{1}".format(month_str, str(year)[-2:]))
next_month = month + 1 if month != 12 else 1
next_year = year + 1 if next_month == 1 else year
date1 = date1.replace(month=next_month, year=next_year)
return months
def getfilename():
prefix = "ANNUAL_CASHFLOW"
year = "2017"
extension = ".xlsx"
return "{}--{}{}".format(prefix, year, extension)
def load_json():
path = 'resources/data.json'
content = json.load(open(path))
return content
def get_data():
title_list = [
{'header': 'ITEM'},
{'header': 'COST'},
{'header': 'STATUS'}
]
content = load_json()
item_list = []
for data_type in content:
for item_name in content[data_type]:
cost = content[data_type][item_name]
current_item_list = [item_name, cost]
item_list.append(current_item_list)
return [title_list, item_list]
def main():
year = 2017
[success, filename] = create_excel_sheet(year)
if success == True:
launchFileInWindows(filename)
if __name__ == '__main__':
main()
|
mit
|
w1ll1am23/home-assistant
|
homeassistant/components/script/__init__.py
|
3
|
12329
|
"""Support for scripts."""
from __future__ import annotations
import asyncio
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_NAME,
CONF_ALIAS,
CONF_DEFAULT,
CONF_DESCRIPTION,
CONF_ICON,
CONF_MODE,
CONF_NAME,
CONF_SELECTOR,
CONF_SEQUENCE,
CONF_VARIABLES,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.script import (
ATTR_CUR,
ATTR_MAX,
CONF_MAX,
CONF_MAX_EXCEEDED,
SCRIPT_MODE_SINGLE,
Script,
make_script_schema,
)
from homeassistant.helpers.selector import validate_selector
from homeassistant.helpers.service import async_set_service_schema
from homeassistant.helpers.trace import trace_get, trace_path
from homeassistant.loader import bind_hass
from .trace import trace_script
_LOGGER = logging.getLogger(__name__)
DOMAIN = "script"
ATTR_LAST_ACTION = "last_action"
ATTR_LAST_TRIGGERED = "last_triggered"
ATTR_VARIABLES = "variables"
CONF_ADVANCED = "advanced"
CONF_EXAMPLE = "example"
CONF_FIELDS = "fields"
CONF_REQUIRED = "required"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
EVENT_SCRIPT_STARTED = "script_started"
SCRIPT_ENTRY_SCHEMA = make_script_schema(
{
vol.Optional(CONF_ALIAS): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_SEQUENCE): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_DESCRIPTION, default=""): cv.string,
vol.Optional(CONF_VARIABLES): cv.SCRIPT_VARIABLES_SCHEMA,
vol.Optional(CONF_FIELDS, default={}): {
cv.string: {
vol.Optional(CONF_ADVANCED, default=False): cv.boolean,
vol.Optional(CONF_DEFAULT): cv.match_all,
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_EXAMPLE): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_REQUIRED, default=False): cv.boolean,
vol.Optional(CONF_SELECTOR): validate_selector,
}
},
},
SCRIPT_MODE_SINGLE,
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(SCRIPT_ENTRY_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
SCRIPT_SERVICE_SCHEMA = vol.Schema(dict)
SCRIPT_TURN_ONOFF_SCHEMA = make_entity_service_schema(
{vol.Optional(ATTR_VARIABLES): {str: cv.match_all}}
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""Return if the script is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
@callback
def scripts_with_entity(hass: HomeAssistant, entity_id: str) -> list[str]:
"""Return all scripts that reference the entity."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
script_entity.entity_id
for script_entity in component.entities
if entity_id in script_entity.script.referenced_entities
]
@callback
def entities_in_script(hass: HomeAssistant, entity_id: str) -> list[str]:
"""Return all entities in script."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
script_entity = component.get_entity(entity_id)
if script_entity is None:
return []
return list(script_entity.script.referenced_entities)
@callback
def scripts_with_device(hass: HomeAssistant, device_id: str) -> list[str]:
"""Return all scripts that reference the device."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
script_entity.entity_id
for script_entity in component.entities
if device_id in script_entity.script.referenced_devices
]
@callback
def devices_in_script(hass: HomeAssistant, entity_id: str) -> list[str]:
"""Return all devices in script."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
script_entity = component.get_entity(entity_id)
if script_entity is None:
return []
return list(script_entity.script.referenced_devices)
@callback
def scripts_with_area(hass: HomeAssistant, area_id: str) -> list[str]:
"""Return all scripts that reference the area."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
script_entity.entity_id
for script_entity in component.entities
if area_id in script_entity.script.referenced_areas
]
@callback
def areas_in_script(hass: HomeAssistant, entity_id: str) -> list[str]:
"""Return all areas in a script."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
script_entity = component.get_entity(entity_id)
if script_entity is None:
return []
return list(script_entity.script.referenced_areas)
async def async_setup(hass, config):
"""Load the scripts from the configuration."""
hass.data[DOMAIN] = component = EntityComponent(_LOGGER, DOMAIN, hass)
await _async_process_config(hass, config, component)
async def reload_service(service):
"""Call a service to reload scripts."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
async def turn_on_service(service):
"""Call a service to turn script on."""
variables = service.data.get(ATTR_VARIABLES)
for script_entity in await component.async_extract_from_service(service):
await script_entity.async_turn_on(
variables=variables, context=service.context, wait=False
)
async def turn_off_service(service):
"""Cancel a script."""
# Stopping a script is ok to be done in parallel
script_entities = await component.async_extract_from_service(service)
if not script_entities:
return
await asyncio.wait(
[
asyncio.create_task(script_entity.async_turn_off())
for script_entity in script_entities
]
)
async def toggle_service(service):
"""Toggle a script."""
for script_entity in await component.async_extract_from_service(service):
await script_entity.async_toggle(context=service.context, wait=False)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service, schema=RELOAD_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, turn_on_service, schema=SCRIPT_TURN_ONOFF_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TURN_OFF, turn_off_service, schema=SCRIPT_TURN_ONOFF_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service, schema=SCRIPT_TURN_ONOFF_SCHEMA
)
return True
async def _async_process_config(hass, config, component):
"""Process script configuration."""
async def service_handler(service):
"""Execute a service call to script.<script name>."""
entity_id = ENTITY_ID_FORMAT.format(service.service)
script_entity = component.get_entity(entity_id)
await script_entity.async_turn_on(
variables=service.data, context=service.context
)
script_entities = [
ScriptEntity(hass, object_id, cfg, cfg.raw_config)
for object_id, cfg in config.get(DOMAIN, {}).items()
]
await component.async_add_entities(script_entities)
# Register services for all entities that were created successfully.
for script_entity in script_entities:
object_id = script_entity.object_id
if component.get_entity(script_entity.entity_id) is None:
_LOGGER.error("Couldn't load script %s", object_id)
continue
cfg = config[DOMAIN][object_id]
hass.services.async_register(
DOMAIN, object_id, service_handler, schema=SCRIPT_SERVICE_SCHEMA
)
# Register the service description
service_desc = {
CONF_NAME: script_entity.name,
CONF_DESCRIPTION: cfg[CONF_DESCRIPTION],
CONF_FIELDS: cfg[CONF_FIELDS],
}
async_set_service_schema(hass, DOMAIN, object_id, service_desc)
class ScriptEntity(ToggleEntity):
"""Representation of a script entity."""
icon = None
def __init__(self, hass, object_id, cfg, raw_config):
"""Initialize the script."""
self.object_id = object_id
self.icon = cfg.get(CONF_ICON)
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self.script = Script(
hass,
cfg[CONF_SEQUENCE],
cfg.get(CONF_ALIAS, object_id),
DOMAIN,
running_description="script sequence",
change_listener=self.async_change_listener,
script_mode=cfg[CONF_MODE],
max_runs=cfg[CONF_MAX],
max_exceeded=cfg[CONF_MAX_EXCEEDED],
logger=logging.getLogger(f"{__name__}.{object_id}"),
variables=cfg.get(CONF_VARIABLES),
)
self._changed = asyncio.Event()
self._raw_config = raw_config
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the entity."""
return self.script.name
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {
ATTR_LAST_TRIGGERED: self.script.last_triggered,
ATTR_MODE: self.script.script_mode,
ATTR_CUR: self.script.runs,
}
if self.script.supports_max:
attrs[ATTR_MAX] = self.script.max_runs
if self.script.last_action:
attrs[ATTR_LAST_ACTION] = self.script.last_action
return attrs
@property
def is_on(self):
"""Return true if script is on."""
return self.script.is_running
@callback
def async_change_listener(self):
"""Update state."""
self.async_write_ha_state()
self._changed.set()
async def async_turn_on(self, **kwargs):
"""Run the script.
Depending on the script's run mode, this may do nothing, restart the script or
fire an additional parallel run.
"""
variables = kwargs.get("variables")
context = kwargs.get("context")
wait = kwargs.get("wait", True)
self.async_set_context(context)
self.hass.bus.async_fire(
EVENT_SCRIPT_STARTED,
{ATTR_NAME: self.script.name, ATTR_ENTITY_ID: self.entity_id},
context=context,
)
coro = self._async_run(variables, context)
if wait:
await coro
return
# Caller does not want to wait for called script to finish so let script run in
# separate Task. However, wait for first state change so we can guarantee that
# it is written to the State Machine before we return.
self._changed.clear()
self.hass.async_create_task(coro)
await self._changed.wait()
async def _async_run(self, variables, context):
with trace_script(
self.hass, self.object_id, self._raw_config, context
) as script_trace:
# Prepare tracing the execution of the script's sequence
script_trace.set_trace(trace_get())
with trace_path("sequence"):
return await self.script.async_run(variables, context)
async def async_turn_off(self, **kwargs):
"""Stop running the script.
If multiple runs are in progress, all will be stopped.
"""
await self.script.async_stop()
async def async_will_remove_from_hass(self):
"""Stop script and remove service when it will be removed from Home Assistant."""
await self.script.async_stop()
# remove service
self.hass.services.async_remove(DOMAIN, self.object_id)
|
apache-2.0
|
jumpstarter-io/neutron
|
neutron/openstack/common/fixture/lockutils.py
|
34
|
1890
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from neutron.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
"""External locking fixture.
This fixture is basically an alternative to the synchronized decorator with
the external flag so that tearDowns and addCleanups will be included in
the lock context for locking between tests. The fixture is recommended to
be the first line in a test method, like so::
def test_method(self):
self.useFixture(LockFixture)
...
or the first line in setUp if all the test methods in the class are
required to be serialized. Something like::
class TestCase(testtools.testcase):
def setUp(self):
self.useFixture(LockFixture)
super(TestCase, self).setUp()
...
This is because addCleanups are put on a LIFO queue that gets run after the
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
|
apache-2.0
|
ogonzalez29/blog
|
wp-content/plugins/wp-leads-builder-any-crm/lib/vtwsclib/third-party/python/json.py
|
49
|
10236
|
import string
import types
## json.py implements a JSON (http://json.org) reader and writer.
## Copyright (C) 2005 Patrick D. Logan
## Contact mailto:patrickdlogan@stardecisions.com
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class _StringGenerator(object):
def __init__(self, string):
self.string = string
self.index = -1
def peek(self):
i = self.index + 1
if i < len(self.string):
return self.string[i]
else:
return None
def next(self):
self.index += 1
if self.index < len(self.string):
return self.string[self.index]
else:
raise StopIteration
def all(self):
return self.string
class WriteException(Exception):
pass
class ReadException(Exception):
pass
class JsonReader(object):
hex_digits = {'A': 10,'B': 11,'C': 12,'D': 13,'E': 14,'F':15}
escapes = {'t':'\t','n':'\n','f':'\f','r':'\r','b':'\b'}
def read(self, s):
self._generator = _StringGenerator(s)
result = self._read()
return result
def _read(self):
self._eatWhitespace()
peek = self._peek()
if peek is None:
raise ReadException, "Nothing to read: '%s'" % self._generator.all()
if peek == '{':
return self._readObject()
elif peek == '[':
return self._readArray()
elif peek == '"':
return self._readString()
elif peek == '-' or peek.isdigit():
return self._readNumber()
elif peek == 't':
return self._readTrue()
elif peek == 'f':
return self._readFalse()
elif peek == 'n':
return self._readNull()
elif peek == '/':
self._readComment()
return self._read()
else:
raise ReadException, "Input is not valid JSON: '%s'" % self._generator.all()
def _readTrue(self):
self._assertNext('t', "true")
self._assertNext('r', "true")
self._assertNext('u', "true")
self._assertNext('e', "true")
return True
def _readFalse(self):
self._assertNext('f', "false")
self._assertNext('a', "false")
self._assertNext('l', "false")
self._assertNext('s', "false")
self._assertNext('e', "false")
return False
def _readNull(self):
self._assertNext('n', "null")
self._assertNext('u', "null")
self._assertNext('l', "null")
self._assertNext('l', "null")
return None
def _assertNext(self, ch, target):
if self._next() != ch:
raise ReadException, "Trying to read %s: '%s'" % (target, self._generator.all())
def _readNumber(self):
isfloat = False
result = self._next()
peek = self._peek()
while peek is not None and (peek.isdigit() or peek == "."):
isfloat = isfloat or peek == "."
result = result + self._next()
peek = self._peek()
try:
if isfloat:
return float(result)
else:
return int(result)
except ValueError:
raise ReadException, "Not a valid JSON number: '%s'" % result
def _readString(self):
result = ""
assert self._next() == '"'
try:
while self._peek() != '"':
ch = self._next()
if ch == "\\":
ch = self._next()
if ch in 'brnft':
ch = self.escapes[ch]
elif ch == "u":
ch4096 = self._next()
ch256 = self._next()
ch16 = self._next()
ch1 = self._next()
n = 4096 * self._hexDigitToInt(ch4096)
n += 256 * self._hexDigitToInt(ch256)
n += 16 * self._hexDigitToInt(ch16)
n += self._hexDigitToInt(ch1)
ch = unichr(n)
elif ch not in '"/\\':
raise ReadException, "Not a valid escaped JSON character: '%s' in %s" % (ch, self._generator.all())
result = result + ch
except StopIteration:
raise ReadException, "Not a valid JSON string: '%s'" % self._generator.all()
assert self._next() == '"'
return result
def _hexDigitToInt(self, ch):
try:
result = self.hex_digits[ch.upper()]
except KeyError:
try:
result = int(ch)
except ValueError:
raise ReadException, "The character %s is not a hex digit." % ch
return result
def _readComment(self):
assert self._next() == "/"
second = self._next()
if second == "/":
self._readDoubleSolidusComment()
elif second == '*':
self._readCStyleComment()
else:
raise ReadException, "Not a valid JSON comment: %s" % self._generator.all()
def _readCStyleComment(self):
try:
done = False
while not done:
ch = self._next()
done = (ch == "*" and self._peek() == "/")
if not done and ch == "/" and self._peek() == "*":
raise ReadException, "Not a valid JSON comment: %s, '/*' cannot be embedded in the comment." % self._generator.all()
self._next()
except StopIteration:
raise ReadException, "Not a valid JSON comment: %s, expected */" % self._generator.all()
def _readDoubleSolidusComment(self):
try:
ch = self._next()
while ch != "\r" and ch != "\n":
ch = self._next()
except StopIteration:
pass
def _readArray(self):
result = []
assert self._next() == '['
done = self._peek() == ']'
while not done:
item = self._read()
result.append(item)
self._eatWhitespace()
done = self._peek() == ']'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
assert ']' == self._next()
return result
def _readObject(self):
result = {}
assert self._next() == '{'
done = self._peek() == '}'
while not done:
key = self._read()
if type(key) is not types.StringType:
raise ReadException, "Not a valid JSON object key (should be a string): %s" % key
self._eatWhitespace()
ch = self._next()
if ch != ":":
raise ReadException, "Not a valid JSON object: '%s' due to: '%s'" % (self._generator.all(), ch)
self._eatWhitespace()
val = self._read()
result[key] = val
self._eatWhitespace()
done = self._peek() == '}'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
assert self._next() == "}"
return result
def _eatWhitespace(self):
p = self._peek()
while p is not None and p in string.whitespace or p == '/':
if p == '/':
self._readComment()
else:
self._next()
p = self._peek()
def _peek(self):
return self._generator.peek()
def _next(self):
return self._generator.next()
class JsonWriter(object):
def _append(self, s):
self._results.append(s)
def write(self, obj, escaped_forward_slash=False):
self._escaped_forward_slash = escaped_forward_slash
self._results = []
self._write(obj)
return "".join(self._results)
def _write(self, obj):
ty = type(obj)
if ty is types.DictType:
n = len(obj)
self._append("{")
for k, v in obj.items():
self._write(k)
self._append(":")
self._write(v)
n = n - 1
if n > 0:
self._append(",")
self._append("}")
elif ty is types.ListType or ty is types.TupleType:
n = len(obj)
self._append("[")
for item in obj:
self._write(item)
n = n - 1
if n > 0:
self._append(",")
self._append("]")
elif ty is types.StringType or ty is types.UnicodeType:
self._append('"')
obj = obj.replace('\\', r'\\')
if self._escaped_forward_slash:
obj = obj.replace('/', r'\/')
obj = obj.replace('"', r'\"')
obj = obj.replace('\b', r'\b')
obj = obj.replace('\f', r'\f')
obj = obj.replace('\n', r'\n')
obj = obj.replace('\r', r'\r')
obj = obj.replace('\t', r'\t')
self._append(obj)
self._append('"')
elif ty is types.IntType or ty is types.LongType:
self._append(str(obj))
elif ty is types.FloatType:
self._append("%f" % obj)
elif obj is True:
self._append("true")
elif obj is False:
self._append("false")
elif obj is None:
self._append("null")
else:
raise WriteException, "Cannot write in JSON: %s" % repr(obj)
def write(obj, escaped_forward_slash=False):
return JsonWriter().write(obj, escaped_forward_slash)
def read(s):
return JsonReader().read(s)
|
gpl-2.0
|
davidhrbac/spacewalk
|
client/tools/rhncfg/actions/rhn-actions-control.py
|
7
|
4440
|
#!/usr/bin/python
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
import ModeControllerCreator
from optparse import Option, OptionParser
def main():
optionsTable = [
Option('--enable-deploy', action='store_true', help='Allow rhncfg-client to deploy files.', default=0),
Option('--enable-diff', action='store_true', help='Allow rhncfg-client to diff files.', default=0),
Option('--enable-upload', action='store_true', help='Allow rhncfg-client to upload files.', default=0),
Option('--enable-mtime-upload', action='store_true', help='Allow rhncfg-client to upload mtime.', default=0),
Option('--enable-run', action='store_true', help='Allow rhncfg-client the ability to execute remote scripts.', default=0),
Option('--enable-all', action='store_true', help='Allow rhncfg-client to do everything.', default=0),
Option('--disable-deploy', action='store_true', help='Disable deployment.', default=0),
Option('--disable-diff', action='store_true', help='Disable diff.', default=0),
Option('--disable-upload', action='store_true', help='Disable upload.', default=0),
Option('--disable-mtime-upload',action='store_true', help='Disable mtime upload.', default=0),
Option('--disable-run', action='store_true', help='Disable remote script execution.', default=0),
Option('--disable-all', action='store_true', help='Disable all options.', default=0),
Option('-f', '--force', action='store_true', help='Force the operation without confirmation', default=0),
Option('--report', action='store_true', help='Report the status of the mode settings (enabled or disabled)', default=0),
]
parser = OptionParser(option_list=optionsTable)
(options, args) = parser.parse_args()
creator = ModeControllerCreator.get_controller_creator()
controller = creator.create_controller()
controller.set_force(options.force)
runcreator = ModeControllerCreator.get_run_controller_creator()
runcontroller = runcreator.create_controller()
runcontroller.set_force(options.force)
if options.enable_deploy:
controller.on('deploy')
if options.enable_diff:
controller.on('diff')
if options.enable_upload:
controller.on('upload')
if options.enable_mtime_upload:
controller.on('mtime_upload')
if options.enable_all:
controller.all_on()
runcontroller.all_on()
if options.enable_run:
runcontroller.on('run')
runcontroller.off('all')
if options.disable_deploy:
controller.off('deploy')
if options.disable_diff:
controller.off('diff')
if options.disable_upload:
controller.off('upload')
if options.disable_mtime_upload:
controller.off('mtime_upload')
if options.disable_all:
controller.all_off()
runcontroller.all_off()
if options.disable_run:
runcontroller.off('run')
runcontroller.off('all')
if options.report:
mode_list = ['deploy', 'diff', 'upload', 'mtime_upload']
for m in mode_list:
rstring = "%s is %s"
status = "disabled"
if controller.is_on(m):
status = "enabled"
print rstring % (m, status)
status = "disabled"
if runcontroller.is_on('all'):
runcontroller.off('all')
runcontroller.on('run')
if runcontroller.is_on('run'):
status = "enabled"
print rstring % ('run', status)
if __name__ == "__main__":
try:
sys.exit(main() or 0)
except KeyboardInterrupt:
sys.stderr.write("user interrupted\n")
sys.exit(0)
|
gpl-2.0
|
akash1808/nova_test_latest
|
nova/cmd/compute.py
|
31
|
2399
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Compute."""
import sys
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
import nova.db.api
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import base as objects_base
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
def block_db_access():
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG = logging.getLogger('nova.compute')
LOG.error(_LE('No db access allowed in nova-compute: %s'),
stacktrace)
raise exception.DBNotAllowed('nova-compute')
nova.db.api.IMPL = NoDB()
def main():
config.parse_args(sys.argv)
logging.setup(CONF, 'nova')
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
server = service.Service.create(binary='nova-compute',
topic=CONF.compute_topic,
db_allowed=CONF.conductor.use_local)
service.serve(server)
service.wait()
|
apache-2.0
|
coder-han/hugula
|
Client/tools/site-packages/PIL/BdfFontFile.py
|
13
|
3337
|
#
# The Python Imaging Library
# $Id: BdfFontFile.py 2134 2004-10-06 08:55:20Z fredrik $
#
# bitmap distribution font (bdf) file parser
#
# history:
# 1996-05-16 fl created (as bdf2pil)
# 1997-08-25 fl converted to FontFile driver
# 2001-05-25 fl removed bogus __init__ call
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
# 2003-04-22 fl more robustification (from Graham Dumpleton)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import Image
import FontFile
import string
# --------------------------------------------------------------------
# parse X Bitmap Distribution Format (BDF)
# --------------------------------------------------------------------
bdf_slant = {
"R": "Roman",
"I": "Italic",
"O": "Oblique",
"RI": "Reverse Italic",
"RO": "Reverse Oblique",
"OT": "Other"
}
bdf_spacing = {
"P": "Proportional",
"M": "Monospaced",
"C": "Cell"
}
def bdf_char(f):
# skip to STARTCHAR
while 1:
s = f.readline()
if not s:
return None
if s[:9] == "STARTCHAR":
break
id = string.strip(s[9:])
# load symbol properties
props = {}
while 1:
s = f.readline()
if not s or s[:6] == "BITMAP":
break
i = string.find(s, " ")
props[s[:i]] = s[i+1:-1]
# load bitmap
bitmap = []
while 1:
s = f.readline()
if not s or s[:7] == "ENDCHAR":
break
bitmap.append(s[:-1])
bitmap = string.join(bitmap, "")
[x, y, l, d] = map(int, string.split(props["BBX"]))
[dx, dy] = map(int, string.split(props["DWIDTH"]))
bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y)
try:
im = Image.fromstring("1", (x, y), bitmap, "hex", "1")
except ValueError:
# deal with zero-width characters
im = Image.new("1", (x, y))
return id, int(props["ENCODING"]), bbox, im
##
# Font file plugin for the X11 BDF format.
class BdfFontFile(FontFile.FontFile):
def __init__(self, fp):
FontFile.FontFile.__init__(self)
s = fp.readline()
if s[:13] != "STARTFONT 2.1":
raise SyntaxError, "not a valid BDF file"
props = {}
comments = []
while 1:
s = fp.readline()
if not s or s[:13] == "ENDPROPERTIES":
break
i = string.find(s, " ")
props[s[:i]] = s[i+1:-1]
if s[:i] in ["COMMENT", "COPYRIGHT"]:
if string.find(s, "LogicalFontDescription") < 0:
comments.append(s[i+1:-1])
font = string.split(props["FONT"], "-")
font[4] = bdf_slant[string.upper(font[4])]
font[11] = bdf_spacing[string.upper(font[11])]
ascent = int(props["FONT_ASCENT"])
descent = int(props["FONT_DESCENT"])
fontname = string.join(font[1:], ";")
# print "#", fontname
# for i in comments:
# print "#", i
font = []
while 1:
c = bdf_char(fp)
if not c:
break
id, ch, (xy, dst, src), im = c
if ch >= 0 and ch < len(self.glyph):
self.glyph[ch] = xy, dst, src, im
|
mit
|
petecummings/django
|
tests/flatpages_tests/test_views.py
|
290
|
6953
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middleware is disabled"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled and should not add a slash"
response = self.client.get('/flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middleware is disabled and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here')
self.assertRedirects(response, '/flatpage_root/some.very_special~chars-here/', status_code=301)
|
bsd-3-clause
|
takeshineshiro/django
|
tests/admin_custom_urls/tests.py
|
276
|
6381
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.utils import quote
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.response import TemplateResponse
from django.test import TestCase, override_settings
from .models import Action, Car, Person
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_custom_urls.urls',)
class AdminCustomUrlsTest(TestCase):
"""
Remember that:
* The Action model has a CharField PK.
* The ModelAdmin for Action customizes the add_view URL, it's
'<app name>/<model name>/!add/'
"""
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
Action.objects.create(name='delete', description='Remove things.')
Action.objects.create(name='rename', description='Gives things other names.')
Action.objects.create(name='add', description='Add things.')
Action.objects.create(name='path/to/file/', description="An action with '/' in its name.")
Action.objects.create(
name='path/to/html/document.html',
description='An action with a name similar to a HTML doc path.'
)
Action.objects.create(
name='javascript:alert(\'Hello world\');">Click here</a>',
description='An action with a name suspected of being a XSS attempt'
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_basic_add_GET(self):
"""
Ensure GET on the add_view works.
"""
add_url = reverse('admin_custom_urls:admin_custom_urls_action_add')
self.assertTrue(add_url.endswith('/!add/'))
response = self.client.get(add_url)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
"""
Ensure GET on the add_view plus specifying a field value in the query
string works.
"""
response = self.client.get(reverse('admin_custom_urls:admin_custom_urls_action_add'), {'name': 'My Action'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'value="My Action"')
def test_basic_add_POST(self):
"""
Ensure POST on add_view works.
"""
post_data = {
'_popup': '1',
"name": 'Action added through a popup',
"description": "Description of added action",
}
response = self.client.post(reverse('admin_custom_urls:admin_custom_urls_action_add'), post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddRelatedObjectPopup')
self.assertContains(response, 'Action added through a popup')
def test_admin_URLs_no_clash(self):
"""
Test that some admin URLs work correctly.
"""
# Should get the change_view for model instance with PK 'add', not show
# the add_view
url = reverse('admin_custom_urls:%s_action_change' % Action._meta.app_label,
args=(quote('add'),))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Change action')
# Should correctly get the change_view for the model instance with the
# funny-looking PK (the one with a 'path/to/html/document.html' value)
url = reverse('admin_custom_urls:%s_action_change' % Action._meta.app_label,
args=(quote("path/to/html/document.html"),))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Change action')
self.assertContains(response, 'value="path/to/html/document.html"')
def test_post_save_add_redirect(self):
"""
Ensures that ModelAdmin.response_post_save_add() controls the
redirection after the 'Save' button has been pressed when adding a
new object.
Refs 8001, 18310, 19505.
"""
post_data = {'name': 'John Doe'}
self.assertEqual(Person.objects.count(), 0)
response = self.client.post(
reverse('admin_custom_urls:admin_custom_urls_person_add'), post_data)
persons = Person.objects.all()
self.assertEqual(len(persons), 1)
self.assertRedirects(
response, reverse('admin_custom_urls:admin_custom_urls_person_history', args=[persons[0].pk]))
def test_post_save_change_redirect(self):
"""
Ensures that ModelAdmin.response_post_save_change() controls the
redirection after the 'Save' button has been pressed when editing an
existing object.
Refs 8001, 18310, 19505.
"""
Person.objects.create(name='John Doe')
self.assertEqual(Person.objects.count(), 1)
person = Person.objects.all()[0]
post_data = {'name': 'Jack Doe'}
response = self.client.post(
reverse('admin_custom_urls:admin_custom_urls_person_change', args=[person.pk]), post_data)
self.assertRedirects(
response, reverse('admin_custom_urls:admin_custom_urls_person_delete', args=[person.pk]))
def test_post_url_continue(self):
"""
Ensures that the ModelAdmin.response_add()'s parameter `post_url_continue`
controls the redirection after an object has been created.
"""
post_data = {'name': 'SuperFast', '_continue': '1'}
self.assertEqual(Car.objects.count(), 0)
response = self.client.post(
reverse('admin_custom_urls:admin_custom_urls_car_add'), post_data)
cars = Car.objects.all()
self.assertEqual(len(cars), 1)
self.assertRedirects(
response, reverse('admin_custom_urls:admin_custom_urls_car_history', args=[cars[0].pk]))
|
bsd-3-clause
|
openstack/nova-solver-scheduler
|
nova_solverscheduler/tests/scheduler/solvers/constraints/test_constraints.py
|
1
|
3368
|
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for solver scheduler constraints.
"""
import mock
from nova import context
from nova import test
from nova_solverscheduler.scheduler.solvers import constraints
from nova_solverscheduler.tests.scheduler import solver_scheduler_fakes \
as fakes
class ConstraintTestBase(test.NoDBTestCase):
"""Base test case for constraints."""
def setUp(self):
super(ConstraintTestBase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
constraint_handler = constraints.ConstraintHandler()
classes = constraint_handler.get_matching_classes(
['nova_solverscheduler.scheduler.solvers.constraints.'
'all_constraints'])
self.class_map = {}
for c in classes:
self.class_map[c.__name__] = c
class ConstraintsTestCase(ConstraintTestBase):
def test_all_constraints(self):
"""Test the existence of all constraint classes."""
self.assertIn('NoConstraint', self.class_map)
self.assertIn('ActiveHostsConstraint', self.class_map)
self.assertIn('DifferentHostConstraint', self.class_map)
def test_base_linear_constraints(self):
blc = constraints.BaseLinearConstraint()
variables, coefficients, constants, operators = (
blc.get_components(None, None, None))
self.assertEqual([], variables)
self.assertEqual([], coefficients)
self.assertEqual([], constants)
self.assertEqual([], operators)
class TestBaseFilterConstraint(ConstraintTestBase):
def setUp(self):
super(TestBaseFilterConstraint, self).setUp()
self.constraint_cls = constraints.BaseFilterConstraint
self._generate_fake_constraint_input()
def _generate_fake_constraint_input(self):
self.fake_filter_properties = {
'instance_uuids': ['fake_uuid_%s' % x for x in range(3)],
'num_instances': 3}
host1 = fakes.FakeSolverSchedulerHostState('host1', 'node1', {})
host2 = fakes.FakeSolverSchedulerHostState('host2', 'node1', {})
self.fake_hosts = [host1, host2]
@mock.patch('nova_solverscheduler.scheduler.solvers.constraints.'
'BaseFilterConstraint.host_filter_cls')
def test_get_constraint_matrix(self, mock_filter_cls):
expected_cons_mat = [
[True, True, True],
[False, False, False]]
mock_filter = mock_filter_cls.return_value
mock_filter.host_passes.side_effect = [True, False]
cons_mat = self.constraint_cls().get_constraint_matrix(
self.fake_hosts, self.fake_filter_properties)
self.assertEqual(expected_cons_mat, cons_mat)
|
apache-2.0
|
trolldbois/python-haystack
|
haystack/allocators/win32/profiles/win7_32.py
|
1
|
36870
|
# -*- coding: utf-8 -*-
#
# TARGET arch is: ['-target', 'i386-win']
# WORD_SIZE is: 4
# POINTER_SIZE is: 4
# LONGDOUBLE_SIZE is: 12
#
import ctypes
c_int128 = ctypes.c_ubyte*16
c_uint128 = c_int128
void = None
if ctypes.sizeof(ctypes.c_longdouble) == 12:
c_long_double_t = ctypes.c_longdouble
else:
c_long_double_t = ctypes.c_ubyte*12
# if local wordsize is same as target, keep ctypes pointer function.
if ctypes.sizeof(ctypes.c_void_p) == 4:
POINTER_T = ctypes.POINTER
else:
# required to access _ctypes
import _ctypes
# Emulate a pointer class using the approriate c_int32/c_int64 type
# The new class should have :
# ['__module__', 'from_param', '_type_', '__dict__', '__weakref__', '__doc__']
# but the class should be submitted to a unique instance for each base type
# to that if A == B, POINTER_T(A) == POINTER_T(B)
ctypes._pointer_t_type_cache = {}
def POINTER_T(pointee):
# a pointer should have the same length as LONG
fake_ptr_base_type = ctypes.c_uint32
# specific case for c_void_p
if pointee is None: # VOID pointer type. c_void_p.
pointee = type(None) # ctypes.c_void_p # ctypes.c_ulong
clsname = 'c_void'
else:
clsname = pointee.__name__
if clsname in ctypes._pointer_t_type_cache:
return ctypes._pointer_t_type_cache[clsname]
# make template
class _T(_ctypes._SimpleCData,):
_type_ = 'I'
_subtype_ = pointee
def _sub_addr_(self):
return self.value
def __repr__(self):
return '%s(%d)'%(clsname, self.value)
def contents(self):
raise TypeError('This is not a ctypes pointer.')
def __init__(self, **args):
raise TypeError('This is not a ctypes pointer. It is not instanciable.')
_class = type('LP_%d_%s'%(4, clsname), (_T,),{})
ctypes._pointer_t_type_cache[clsname] = _class
return _class
int8_t = ctypes.c_int8
int16_t = ctypes.c_int16
int32_t = ctypes.c_int32
int64_t = ctypes.c_int64
uint8_t = ctypes.c_uint8
uint16_t = ctypes.c_uint16
uint32_t = ctypes.c_uint32
uint64_t = ctypes.c_uint64
int_least8_t = ctypes.c_byte
int_least16_t = ctypes.c_int16
int_least32_t = ctypes.c_int32
int_least64_t = ctypes.c_int64
uint_least8_t = ctypes.c_ubyte
uint_least16_t = ctypes.c_uint16
uint_least32_t = ctypes.c_uint32
uint_least64_t = ctypes.c_uint64
int_fast8_t = ctypes.c_byte
int_fast16_t = ctypes.c_int32
int_fast32_t = ctypes.c_int32
int_fast64_t = ctypes.c_int64
uint_fast8_t = ctypes.c_ubyte
uint_fast16_t = ctypes.c_uint32
uint_fast32_t = ctypes.c_uint32
uint_fast64_t = ctypes.c_uint64
intptr_t = ctypes.c_int32
uintptr_t = ctypes.c_uint32
intmax_t = ctypes.c_int64
uintmax_t = ctypes.c_uint64
UINT8 = ctypes.c_ubyte
UCHAR = ctypes.c_ubyte
BOOL = ctypes.c_ubyte
CHAR = ctypes.c_byte
INT8 = ctypes.c_byte
WCHAR = ctypes.c_uint16
UINT16 = ctypes.c_uint16
USHORT = ctypes.c_uint16
SHORT = ctypes.c_int16
UINT32 = ctypes.c_uint32
ULONG = ctypes.c_uint32
LONG = ctypes.c_int32
UINT64 = ctypes.c_uint64
ULONGLONG = ctypes.c_uint64
LONGLONG = ctypes.c_int64
PPVOID64 = ctypes.c_uint64
PVOID64 = ctypes.c_uint64
PVOID32 = ctypes.c_uint32
PPVOID32 = ctypes.c_uint32
VOID = None
DOUBLE = ctypes.c_double
PUINT8 = POINTER_T(ctypes.c_ubyte)
PUCHAR = POINTER_T(ctypes.c_ubyte)
PBOOL = POINTER_T(ctypes.c_ubyte)
PCHAR = POINTER_T(ctypes.c_byte)
PINT8 = POINTER_T(ctypes.c_byte)
PUINT16 = POINTER_T(ctypes.c_uint16)
PUSHORT = POINTER_T(ctypes.c_uint16)
PSHORT = POINTER_T(ctypes.c_int16)
PUINT32 = POINTER_T(ctypes.c_uint32)
PULONG = POINTER_T(ctypes.c_uint32)
PLONG = POINTER_T(ctypes.c_int32)
PUINT64 = POINTER_T(ctypes.c_uint64)
PULONGLONG = POINTER_T(ctypes.c_uint64)
PLONGLONG = POINTER_T(ctypes.c_int64)
PVOID = POINTER_T(None)
PPVOID = POINTER_T(POINTER_T(None))
class struct__HEAP(ctypes.Structure):
pass
class struct__HEAP_ENTRY(ctypes.Structure):
pass
class union__HEAP_ENTRY_0(ctypes.Union):
pass
class struct__HEAP_ENTRY_0_4(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Code1', ctypes.c_uint32),
('Code2', ctypes.c_uint16),
('Code3', ctypes.c_ubyte),
('Code4', ctypes.c_ubyte),
]
class struct__HEAP_ENTRY_0_3(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('InterceptorValue', ctypes.c_uint32),
('UnusedBytesLength', ctypes.c_uint16),
('EntryOffset', ctypes.c_ubyte),
('ExtendedBlockSignature', ctypes.c_ubyte),
]
class struct__HEAP_ENTRY_0_1(ctypes.Structure):
pass
class union__HEAP_ENTRY_0_1_0(ctypes.Union):
_pack_ = True # source:False
_fields_ = [
('SegmentOffset', ctypes.c_ubyte),
('LFHFlags', ctypes.c_ubyte),
]
struct__HEAP_ENTRY_0_1._pack_ = True # source:False
struct__HEAP_ENTRY_0_1._fields_ = [
('SubSegmentCode', POINTER_T(None)),
('PreviousSize', ctypes.c_uint16),
('_2', union__HEAP_ENTRY_0_1_0),
('UnusedBytes', ctypes.c_ubyte),
]
class struct__HEAP_ENTRY_0_2(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('FunctionIndex', ctypes.c_uint16),
('ContextValue', ctypes.c_uint16),
]
class struct__HEAP_ENTRY_0_5(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('AgregateCode', ctypes.c_uint64),
]
class struct__HEAP_ENTRY_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Size', ctypes.c_uint16),
('Flags', ctypes.c_ubyte),
('SmallTagIndex', ctypes.c_ubyte),
]
union__HEAP_ENTRY_0._pack_ = True # source:False
union__HEAP_ENTRY_0._fields_ = [
('_0', struct__HEAP_ENTRY_0_0),
('_1', struct__HEAP_ENTRY_0_1),
('_2', struct__HEAP_ENTRY_0_2),
('_3', struct__HEAP_ENTRY_0_3),
('_4', struct__HEAP_ENTRY_0_4),
('_5', struct__HEAP_ENTRY_0_5),
]
struct__HEAP_ENTRY._pack_ = True # source:True
struct__HEAP_ENTRY._fields_ = [
('_0', union__HEAP_ENTRY_0),
]
HEAP_ENTRY = struct__HEAP_ENTRY
class struct__HEAP_PSEUDO_TAG_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('Allocs', ctypes.c_uint32),
('Frees', ctypes.c_uint32),
('Size', ctypes.c_uint32),
]
class struct__HEAP_TUNING_PARAMETERS(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('CommittThresholdShift', ctypes.c_uint32),
('MaxPreCommittThreshold', ctypes.c_uint32),
]
HEAP_TUNING_PARAMETERS = struct__HEAP_TUNING_PARAMETERS
class struct__HEAP_TAG_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('Allocs', ctypes.c_uint32),
('Frees', ctypes.c_uint32),
('Size', ctypes.c_uint32),
('TagIndex', ctypes.c_uint16),
('CreatorBackTraceIndex', ctypes.c_uint16),
('TagName', ctypes.c_uint16 * 24),
]
class struct__LIST_ENTRY(ctypes.Structure):
pass
struct__LIST_ENTRY._pack_ = True # source:True
struct__LIST_ENTRY._fields_ = [
('Flink', POINTER_T(struct__LIST_ENTRY)),
('Blink', POINTER_T(struct__LIST_ENTRY)),
]
LIST_ENTRY = struct__LIST_ENTRY
class struct__HEAP_LOCK(ctypes.Structure):
pass
class union__HEAP_LOCK_0(ctypes.Union):
pass
class struct__RTL_CRITICAL_SECTION(ctypes.Structure):
pass
class struct__RTL_CRITICAL_SECTION_DEBUG(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('Type', ctypes.c_uint16),
('CreatorBackTraceIndex', ctypes.c_uint16),
('CriticalSection', POINTER_T(struct__RTL_CRITICAL_SECTION)),
('ProcessLocksList', LIST_ENTRY),
('EntryCount', ctypes.c_uint32),
('ContentionCount', ctypes.c_uint32),
('Flags', ctypes.c_uint32),
('CreatorBackTraceIndexHigh', ctypes.c_uint16),
('SpareUSHORT', ctypes.c_uint16),
]
struct__RTL_CRITICAL_SECTION._pack_ = True # source:True
struct__RTL_CRITICAL_SECTION._fields_ = [
('DebugInfo', POINTER_T(struct__RTL_CRITICAL_SECTION_DEBUG)),
('LockCount', ctypes.c_int32),
('RecursionCount', ctypes.c_int32),
('OwningThread', POINTER_T(None)),
('LockSemaphore', POINTER_T(None)),
('SpinCount', ctypes.c_uint32),
]
RTL_CRITICAL_SECTION = struct__RTL_CRITICAL_SECTION
union__HEAP_LOCK_0._pack_ = True # source:False
union__HEAP_LOCK_0._fields_ = [
('CriticalSection', RTL_CRITICAL_SECTION),
]
struct__HEAP_LOCK._pack_ = True # source:True
struct__HEAP_LOCK._fields_ = [
('Lock', union__HEAP_LOCK_0),
]
class struct__HEAP_COUNTERS(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('TotalMemoryReserved', ctypes.c_uint32),
('TotalMemoryCommitted', ctypes.c_uint32),
('TotalMemoryLargeUCR', ctypes.c_uint32),
('TotalSizeInVirtualBlocks', ctypes.c_uint32),
('TotalSegments', ctypes.c_uint32),
('TotalUCRs', ctypes.c_uint32),
('CommittOps', ctypes.c_uint32),
('DeCommitOps', ctypes.c_uint32),
('LockAcquires', ctypes.c_uint32),
('LockCollisions', ctypes.c_uint32),
('CommitRate', ctypes.c_uint32),
('DecommittRate', ctypes.c_uint32),
('CommitFailures', ctypes.c_uint32),
('InBlockCommitFailures', ctypes.c_uint32),
('CompactHeapCalls', ctypes.c_uint32),
('CompactedUCRs', ctypes.c_uint32),
('AllocAndFreeOps', ctypes.c_uint32),
('InBlockDeccommits', ctypes.c_uint32),
('InBlockDeccomitSize', ctypes.c_uint32),
('HighWatermarkSize', ctypes.c_uint32),
('LastPolledSize', ctypes.c_uint32),
]
HEAP_COUNTERS = struct__HEAP_COUNTERS
struct__HEAP._pack_ = True # source:True
struct__HEAP._fields_ = [
('Entry', HEAP_ENTRY),
('SegmentSignature', ctypes.c_uint32),
('SegmentFlags', ctypes.c_uint32),
('SegmentListEntry', LIST_ENTRY),
('Heap', POINTER_T(struct__HEAP)),
('BaseAddress', POINTER_T(None)),
('NumberOfPages', ctypes.c_uint32),
('FirstEntry', POINTER_T(struct__HEAP_ENTRY)),
('LastValidEntry', POINTER_T(struct__HEAP_ENTRY)),
('NumberOfUnCommittedPages', ctypes.c_uint32),
('NumberOfUnCommittedRanges', ctypes.c_uint32),
('SegmentAllocatorBackTraceIndex', ctypes.c_uint16),
('Reserved', ctypes.c_uint16),
('UCRSegmentList', LIST_ENTRY),
('Flags', ctypes.c_uint32),
('ForceFlags', ctypes.c_uint32),
('CompatibilityFlags', ctypes.c_uint32),
('EncodeFlagMask', ctypes.c_uint32),
('Encoding', HEAP_ENTRY),
('PointerKey', ctypes.c_uint32),
('Interceptor', ctypes.c_uint32),
('VirtualMemoryThreshold', ctypes.c_uint32),
('Signature', ctypes.c_uint32),
('SegmentReserve', ctypes.c_uint32),
('SegmentCommit', ctypes.c_uint32),
('DeCommitFreeBlockThreshold', ctypes.c_uint32),
('DeCommitTotalFreeThreshold', ctypes.c_uint32),
('TotalFreeSize', ctypes.c_uint32),
('MaximumAllocationSize', ctypes.c_uint32),
('ProcessHeapsListIndex', ctypes.c_uint16),
('HeaderValidateLength', ctypes.c_uint16),
('HeaderValidateCopy', POINTER_T(None)),
('NextAvailableTagIndex', ctypes.c_uint16),
('MaximumTagIndex', ctypes.c_uint16),
('TagEntries', POINTER_T(struct__HEAP_TAG_ENTRY)),
('UCRList', LIST_ENTRY),
('AlignRound', ctypes.c_uint32),
('AlignMask', ctypes.c_uint32),
('VirtualAllocdBlocks', LIST_ENTRY),
('SegmentList', LIST_ENTRY),
('AllocatorBackTraceIndex', ctypes.c_uint16),
('gap_in_pdb_ofs_B2', ctypes.c_ubyte * 2),
('NonDedicatedListLength', ctypes.c_uint32),
('BlocksIndex', POINTER_T(None)),
('UCRIndex', POINTER_T(None)),
('PseudoTagEntries', POINTER_T(struct__HEAP_PSEUDO_TAG_ENTRY)),
('FreeLists', LIST_ENTRY),
('LockVariable', POINTER_T(struct__HEAP_LOCK)),
('CommitRoutine', POINTER_T(ctypes.CFUNCTYPE(ctypes.c_int32, POINTER_T(None), POINTER_T(POINTER_T(None)), POINTER_T(ctypes.c_uint32)))),
('FrontEndHeap', POINTER_T(None)),
('FrontHeapLockCount', ctypes.c_uint16),
('FrontEndHeapType', ctypes.c_ubyte),
('gap_in_pdb_ofs_DB', ctypes.c_ubyte * 1),
('Counters', HEAP_COUNTERS),
('TuningParameters', HEAP_TUNING_PARAMETERS),
]
HEAP = struct__HEAP
PHEAP = POINTER_T(struct__HEAP)
PPHEAP = POINTER_T(POINTER_T(struct__HEAP))
class struct__HEAP_LOCAL_DATA(ctypes.Structure):
pass
class union__SLIST_HEADER(ctypes.Union):
pass
class struct__SLIST_HEADER_0(ctypes.Structure):
pass
class struct__SINGLE_LIST_ENTRY(ctypes.Structure):
pass
struct__SINGLE_LIST_ENTRY._pack_ = True # source:True
struct__SINGLE_LIST_ENTRY._fields_ = [
('Next', POINTER_T(struct__SINGLE_LIST_ENTRY)),
]
SINGLE_LIST_ENTRY = struct__SINGLE_LIST_ENTRY
struct__SLIST_HEADER_0._pack_ = True # source:False
struct__SLIST_HEADER_0._fields_ = [
('Next', SINGLE_LIST_ENTRY),
('Depth', ctypes.c_uint16),
('Sequence', ctypes.c_uint16),
]
union__SLIST_HEADER._pack_ = True # source:True
union__SLIST_HEADER._fields_ = [
('Alignment', ctypes.c_uint64),
('_1', struct__SLIST_HEADER_0),
]
SLIST_HEADER = union__SLIST_HEADER
class struct__HEAP_LOCAL_SEGMENT_INFO(ctypes.Structure):
pass
class struct__HEAP_SUBSEGMENT(ctypes.Structure):
pass
class struct__INTERLOCK_SEQ(ctypes.Structure):
pass
class union__INTERLOCK_SEQ_0(ctypes.Union):
pass
class struct__INTERLOCK_SEQ_0_1(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('OffsetAndDepth', ctypes.c_uint32),
('Sequence', ctypes.c_uint32),
]
class struct__INTERLOCK_SEQ_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Depth', ctypes.c_uint16),
('FreeEntryOffset', ctypes.c_uint16),
]
class struct__INTERLOCK_SEQ_0_2(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Exchg', ctypes.c_int64),
]
union__INTERLOCK_SEQ_0._pack_ = True # source:False
union__INTERLOCK_SEQ_0._fields_ = [
('_0', struct__INTERLOCK_SEQ_0_0),
('_1', struct__INTERLOCK_SEQ_0_1),
('_2', struct__INTERLOCK_SEQ_0_2),
]
struct__INTERLOCK_SEQ._pack_ = True # source:True
struct__INTERLOCK_SEQ._fields_ = [
('_0', union__INTERLOCK_SEQ_0),
]
INTERLOCK_SEQ = struct__INTERLOCK_SEQ
class union__HEAP_SUBSEGMENT_0(ctypes.Union):
pass
class struct__HEAP_SUBSEGMENT_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('BlockSize', ctypes.c_uint16),
('Flags', ctypes.c_uint16),
('BlockCount', ctypes.c_uint16),
('SizeIndex', ctypes.c_ubyte),
('AffinityIndex', ctypes.c_ubyte),
]
union__HEAP_SUBSEGMENT_0._pack_ = True # source:False
union__HEAP_SUBSEGMENT_0._fields_ = [
('_0', struct__HEAP_SUBSEGMENT_0_0),
('Alignment', ctypes.c_uint32 * 2),
]
class struct__HEAP_USERDATA_HEADER(ctypes.Structure):
pass
class union__HEAP_USERDATA_HEADER_0(ctypes.Union):
pass
class struct__HEAP_USERDATA_HEADER_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('SubSegment', POINTER_T(struct__HEAP_SUBSEGMENT)),
('Reserved', POINTER_T(None)),
('SizeIndex', ctypes.c_uint32),
('Signature', ctypes.c_uint32),
]
union__HEAP_USERDATA_HEADER_0._pack_ = True # source:False
union__HEAP_USERDATA_HEADER_0._fields_ = [
('SFreeListEntry', SINGLE_LIST_ENTRY),
('_1', struct__HEAP_USERDATA_HEADER_0_0),
]
struct__HEAP_USERDATA_HEADER._pack_ = True # source:True
struct__HEAP_USERDATA_HEADER._fields_ = [
('_0', union__HEAP_USERDATA_HEADER_0),
]
struct__HEAP_SUBSEGMENT._pack_ = True # source:True
struct__HEAP_SUBSEGMENT._fields_ = [
('LocalInfo', POINTER_T(struct__HEAP_LOCAL_SEGMENT_INFO)),
('UserBlocks', POINTER_T(struct__HEAP_USERDATA_HEADER)),
('AggregateExchg', INTERLOCK_SEQ),
('_3', union__HEAP_SUBSEGMENT_0),
('SFreeListEntry', SINGLE_LIST_ENTRY),
('Lock', ctypes.c_uint32),
]
class union__HEAP_BUCKET_COUNTERS(ctypes.Union):
pass
class struct__HEAP_BUCKET_COUNTERS_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('TotalBlocks', ctypes.c_uint32),
('SubSegmentCounts', ctypes.c_uint32),
]
class struct__HEAP_BUCKET_COUNTERS_1(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Aggregate64', ctypes.c_int64),
]
union__HEAP_BUCKET_COUNTERS._pack_ = True # source:True
union__HEAP_BUCKET_COUNTERS._fields_ = [
('_0', struct__HEAP_BUCKET_COUNTERS_0),
('_1', struct__HEAP_BUCKET_COUNTERS_1),
]
HEAP_BUCKET_COUNTERS = union__HEAP_BUCKET_COUNTERS
struct__HEAP_LOCAL_SEGMENT_INFO._pack_ = True # source:True
struct__HEAP_LOCAL_SEGMENT_INFO._fields_ = [
('Hint', POINTER_T(struct__HEAP_SUBSEGMENT)),
('ActiveSubsegment', POINTER_T(struct__HEAP_SUBSEGMENT)),
('CachedItems', POINTER_T(struct__HEAP_SUBSEGMENT) * 16),
('SListHeader', SLIST_HEADER),
('Counters', HEAP_BUCKET_COUNTERS),
('LocalData', POINTER_T(struct__HEAP_LOCAL_DATA)),
('LastOpSequence', ctypes.c_uint32),
('BucketIndex', ctypes.c_uint16),
('LastUsed', ctypes.c_uint16),
('gap_in_pdb_ofs_64', ctypes.c_ubyte * 4),
]
class struct__LFH_BLOCK_ZONE(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('ListEntry', LIST_ENTRY),
('FreePointer', POINTER_T(None)),
('Limit', POINTER_T(None)),
]
class struct__LFH_HEAP(ctypes.Structure):
pass
class union__HEAP_BUCKET_RUN_INFO(ctypes.Union):
pass
class struct__HEAP_BUCKET_RUN_INFO_1(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Aggregate64', ctypes.c_int64),
]
class struct__HEAP_BUCKET_RUN_INFO_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Bucket', ctypes.c_uint32),
('RunLength', ctypes.c_uint32),
]
union__HEAP_BUCKET_RUN_INFO._pack_ = True # source:True
union__HEAP_BUCKET_RUN_INFO._fields_ = [
('_0', struct__HEAP_BUCKET_RUN_INFO_0),
('_1', struct__HEAP_BUCKET_RUN_INFO_1),
]
HEAP_BUCKET_RUN_INFO = union__HEAP_BUCKET_RUN_INFO
class struct__HEAP_BUCKET(ctypes.Structure):
pass
class struct__HEAP_BUCKET_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('UseAffinity', ctypes.c_uint8, 1),
('DebugFlags', ctypes.c_uint8, 2),
('PADDING_0', ctypes.c_uint8, 5),
]
struct__HEAP_BUCKET._pack_ = True # source:True
struct__HEAP_BUCKET._fields_ = [
('BlockUnits', ctypes.c_uint16),
('SizeIndex', ctypes.c_ubyte),
('_2', struct__HEAP_BUCKET_0),
]
class struct__USER_MEMORY_CACHE_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('UserBlocks', SLIST_HEADER),
('AvailableBlocks', ctypes.c_uint32),
('gap_in_pdb_ofs_C', ctypes.c_ubyte * 4),
]
struct__LFH_HEAP._pack_ = True # source:True
struct__LFH_HEAP._fields_ = [
('Lock', RTL_CRITICAL_SECTION),
('SubSegmentZones', LIST_ENTRY),
('ZoneBlockSize', ctypes.c_uint32),
('Heap', POINTER_T(None)),
('SegmentChange', ctypes.c_uint32),
('SegmentCreate', ctypes.c_uint32),
('SegmentInsertInFree', ctypes.c_uint32),
('SegmentDelete', ctypes.c_uint32),
('CacheAllocs', ctypes.c_uint32),
('CacheFrees', ctypes.c_uint32),
('SizeInCache', ctypes.c_uint32),
('gap_in_pdb_ofs_44', ctypes.c_ubyte * 4),
('RunInfo', HEAP_BUCKET_RUN_INFO),
('UserBlockCache', struct__USER_MEMORY_CACHE_ENTRY * 12),
('Buckets', struct__HEAP_BUCKET * 128),
('LocalData', struct__HEAP_LOCAL_DATA * 1),
]
struct__HEAP_LOCAL_DATA._pack_ = True # source:True
struct__HEAP_LOCAL_DATA._fields_ = [
('DeletedSubSegments', SLIST_HEADER),
('CrtZone', POINTER_T(struct__LFH_BLOCK_ZONE)),
('LowFragHeap', POINTER_T(struct__LFH_HEAP)),
('Sequence', ctypes.c_uint32),
('gap_in_pdb_ofs_14', ctypes.c_ubyte * 4),
('SegmentInfo', struct__HEAP_LOCAL_SEGMENT_INFO * 128),
]
HEAP_LOCAL_DATA = struct__HEAP_LOCAL_DATA
PHEAP_LOCAL_DATA = POINTER_T(struct__HEAP_LOCAL_DATA)
PPHEAP_LOCAL_DATA = POINTER_T(POINTER_T(struct__HEAP_LOCAL_DATA))
HEAP_LOCAL_SEGMENT_INFO = struct__HEAP_LOCAL_SEGMENT_INFO
PHEAP_LOCAL_SEGMENT_INFO = POINTER_T(struct__HEAP_LOCAL_SEGMENT_INFO)
PPHEAP_LOCAL_SEGMENT_INFO = POINTER_T(POINTER_T(struct__HEAP_LOCAL_SEGMENT_INFO))
HEAP_LOCK = struct__HEAP_LOCK
PHEAP_LOCK = POINTER_T(struct__HEAP_LOCK)
PPHEAP_LOCK = POINTER_T(POINTER_T(struct__HEAP_LOCK))
HEAP_SUBSEGMENT = struct__HEAP_SUBSEGMENT
PHEAP_SUBSEGMENT = POINTER_T(struct__HEAP_SUBSEGMENT)
PPHEAP_SUBSEGMENT = POINTER_T(POINTER_T(struct__HEAP_SUBSEGMENT))
HEAP_USERDATA_HEADER = struct__HEAP_USERDATA_HEADER
PPHEAP_USERDATA_HEADER = POINTER_T(POINTER_T(struct__HEAP_USERDATA_HEADER))
PHEAP_USERDATA_HEADER = POINTER_T(struct__HEAP_USERDATA_HEADER)
LFH_HEAP = struct__LFH_HEAP
PPLFH_HEAP = POINTER_T(POINTER_T(struct__LFH_HEAP))
PLFH_HEAP = POINTER_T(struct__LFH_HEAP)
PPRTL_CRITICAL_SECTION = POINTER_T(POINTER_T(struct__RTL_CRITICAL_SECTION))
PRTL_CRITICAL_SECTION = POINTER_T(struct__RTL_CRITICAL_SECTION)
RTL_CRITICAL_SECTION_DEBUG = struct__RTL_CRITICAL_SECTION_DEBUG
PRTL_CRITICAL_SECTION_DEBUG = POINTER_T(struct__RTL_CRITICAL_SECTION_DEBUG)
PPRTL_CRITICAL_SECTION_DEBUG = POINTER_T(POINTER_T(struct__RTL_CRITICAL_SECTION_DEBUG))
PPLIST_ENTRY = POINTER_T(POINTER_T(struct__LIST_ENTRY))
PLIST_ENTRY = POINTER_T(struct__LIST_ENTRY)
PSINGLE_LIST_ENTRY = POINTER_T(struct__SINGLE_LIST_ENTRY)
PPSINGLE_LIST_ENTRY = POINTER_T(POINTER_T(struct__SINGLE_LIST_ENTRY))
HEAP_TAG_ENTRY = struct__HEAP_TAG_ENTRY
PHEAP_TAG_ENTRY = POINTER_T(struct__HEAP_TAG_ENTRY)
PPHEAP_TAG_ENTRY = POINTER_T(POINTER_T(struct__HEAP_TAG_ENTRY))
PPHEAP_BUCKET_COUNTERS = POINTER_T(POINTER_T(union__HEAP_BUCKET_COUNTERS))
PHEAP_BUCKET_COUNTERS = POINTER_T(union__HEAP_BUCKET_COUNTERS)
PPHEAP_BUCKET_RUN_INFO = POINTER_T(POINTER_T(union__HEAP_BUCKET_RUN_INFO))
PHEAP_BUCKET_RUN_INFO = POINTER_T(union__HEAP_BUCKET_RUN_INFO)
PPINTERLOCK_SEQ = POINTER_T(POINTER_T(struct__INTERLOCK_SEQ))
PINTERLOCK_SEQ = POINTER_T(struct__INTERLOCK_SEQ)
PPHEAP_BUCKET = POINTER_T(POINTER_T(struct__HEAP_BUCKET))
HEAP_BUCKET = struct__HEAP_BUCKET
PHEAP_BUCKET = POINTER_T(struct__HEAP_BUCKET)
PPHEAP_COUNTERS = POINTER_T(POINTER_T(struct__HEAP_COUNTERS))
PHEAP_COUNTERS = POINTER_T(struct__HEAP_COUNTERS)
PHEAP_PSEUDO_TAG_ENTRY = POINTER_T(struct__HEAP_PSEUDO_TAG_ENTRY)
PPHEAP_PSEUDO_TAG_ENTRY = POINTER_T(POINTER_T(struct__HEAP_PSEUDO_TAG_ENTRY))
HEAP_PSEUDO_TAG_ENTRY = struct__HEAP_PSEUDO_TAG_ENTRY
PPHEAP_ENTRY = POINTER_T(POINTER_T(struct__HEAP_ENTRY))
PHEAP_ENTRY = POINTER_T(struct__HEAP_ENTRY)
PHEAP_TUNING_PARAMETERS = POINTER_T(struct__HEAP_TUNING_PARAMETERS)
PPHEAP_TUNING_PARAMETERS = POINTER_T(POINTER_T(struct__HEAP_TUNING_PARAMETERS))
LFH_BLOCK_ZONE = struct__LFH_BLOCK_ZONE
PLFH_BLOCK_ZONE = POINTER_T(struct__LFH_BLOCK_ZONE)
PPLFH_BLOCK_ZONE = POINTER_T(POINTER_T(struct__LFH_BLOCK_ZONE))
PSLIST_HEADER = POINTER_T(union__SLIST_HEADER)
PPSLIST_HEADER = POINTER_T(POINTER_T(union__SLIST_HEADER))
PPUSER_MEMORY_CACHE_ENTRY = POINTER_T(POINTER_T(struct__USER_MEMORY_CACHE_ENTRY))
PUSER_MEMORY_CACHE_ENTRY = POINTER_T(struct__USER_MEMORY_CACHE_ENTRY)
USER_MEMORY_CACHE_ENTRY = struct__USER_MEMORY_CACHE_ENTRY
class struct__HEAP_SEGMENT(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('Entry', HEAP_ENTRY),
('SegmentSignature', ctypes.c_uint32),
('SegmentFlags', ctypes.c_uint32),
('SegmentListEntry', LIST_ENTRY),
('Heap', POINTER_T(struct__HEAP)),
('BaseAddress', POINTER_T(None)),
('NumberOfPages', ctypes.c_uint32),
('FirstEntry', POINTER_T(struct__HEAP_ENTRY)),
('LastValidEntry', POINTER_T(struct__HEAP_ENTRY)),
('NumberOfUnCommittedPages', ctypes.c_uint32),
('NumberOfUnCommittedRanges', ctypes.c_uint32),
('SegmentAllocatorBackTraceIndex', ctypes.c_uint16),
('Reserved', ctypes.c_uint16),
('UCRSegmentList', LIST_ENTRY),
]
HEAP_SEGMENT = struct__HEAP_SEGMENT
PHEAP_SEGMENT = POINTER_T(struct__HEAP_SEGMENT)
PPHEAP_SEGMENT = POINTER_T(POINTER_T(struct__HEAP_SEGMENT))
class struct_c__SA_LIST_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_SINGLE_LIST_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP_COUNTERS(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP_TAG_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_INTERLOCK_SEQ(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct__HEAP_ENTRY_EXTRA(ctypes.Structure):
pass
class union__HEAP_ENTRY_EXTRA_0(ctypes.Union):
pass
class struct__HEAP_ENTRY_EXTRA_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('AllocatorBackTraceIndex', ctypes.c_uint16),
('TagIndex', ctypes.c_uint16),
('Settable', ctypes.c_uint32),
]
class struct__HEAP_ENTRY_EXTRA_0_1(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ZeroInit', ctypes.c_uint64),
]
union__HEAP_ENTRY_EXTRA_0._pack_ = True # source:False
union__HEAP_ENTRY_EXTRA_0._fields_ = [
('_0', struct__HEAP_ENTRY_EXTRA_0_0),
('_1', struct__HEAP_ENTRY_EXTRA_0_1),
]
struct__HEAP_ENTRY_EXTRA._pack_ = True # source:True
struct__HEAP_ENTRY_EXTRA._fields_ = [
('_0', union__HEAP_ENTRY_EXTRA_0),
]
HEAP_ENTRY_EXTRA = struct__HEAP_ENTRY_EXTRA
PPHEAP_ENTRY_EXTRA = POINTER_T(POINTER_T(struct__HEAP_ENTRY_EXTRA))
PHEAP_ENTRY_EXTRA = POINTER_T(struct__HEAP_ENTRY_EXTRA)
class struct_c__SA_HEAP_PSEUDO_TAG_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP_TUNING_PARAMETERS(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct__HEAP_LIST_LOOKUP(ctypes.Structure):
pass
struct__HEAP_LIST_LOOKUP._pack_ = True # source:True
struct__HEAP_LIST_LOOKUP._fields_ = [
('ExtendedLookup', POINTER_T(struct__HEAP_LIST_LOOKUP)),
('ArraySize', ctypes.c_uint32),
('ExtraItem', ctypes.c_uint32),
('ItemCount', ctypes.c_uint32),
('OutOfRangeItems', ctypes.c_uint32),
('BaseIndex', ctypes.c_uint32),
('ListHead', POINTER_T(struct__LIST_ENTRY)),
('ListsInUseUlong', POINTER_T(ctypes.c_uint32)),
('ListHints', POINTER_T(POINTER_T(struct__LIST_ENTRY))),
]
HEAP_LIST_LOOKUP = struct__HEAP_LIST_LOOKUP
PHEAP_LIST_LOOKUP = POINTER_T(struct__HEAP_LIST_LOOKUP)
PPHEAP_LIST_LOOKUP = POINTER_T(POINTER_T(struct__HEAP_LIST_LOOKUP))
class union_c__UA_SLIST_HEADER(ctypes.Union):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct__HEAP_UCR_DESCRIPTOR(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('ListEntry', LIST_ENTRY),
('SegmentEntry', LIST_ENTRY),
('Address', POINTER_T(None)),
('Size', ctypes.c_uint32),
]
PHEAP_UCR_DESCRIPTOR = POINTER_T(struct__HEAP_UCR_DESCRIPTOR)
HEAP_UCR_DESCRIPTOR = struct__HEAP_UCR_DESCRIPTOR
PPHEAP_UCR_DESCRIPTOR = POINTER_T(POINTER_T(struct__HEAP_UCR_DESCRIPTOR))
class struct__HEAP_LOOKASIDE(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('ListHead', SLIST_HEADER),
('Depth', ctypes.c_uint16),
('MaximumDepth', ctypes.c_uint16),
('TotalAllocates', ctypes.c_uint32),
('AllocateMisses', ctypes.c_uint32),
('TotalFrees', ctypes.c_uint32),
('FreeMisses', ctypes.c_uint32),
('LastTotalAllocates', ctypes.c_uint32),
('LastAllocateMisses', ctypes.c_uint32),
('Counters', ctypes.c_uint32 * 2),
('gap_in_pdb_ofs_2C', ctypes.c_ubyte * 4),
]
HEAP_LOOKASIDE = struct__HEAP_LOOKASIDE
PPHEAP_LOOKASIDE = POINTER_T(POINTER_T(struct__HEAP_LOOKASIDE))
PHEAP_LOOKASIDE = POINTER_T(struct__HEAP_LOOKASIDE)
class struct__HEAP_FREE_ENTRY(ctypes.Structure):
pass
class union__HEAP_FREE_ENTRY_0(ctypes.Union):
pass
class struct__HEAP_FREE_ENTRY_0_1(ctypes.Structure):
pass
class union__HEAP_FREE_ENTRY_0_1_0(ctypes.Union):
_pack_ = True # source:False
_fields_ = [
('SegmentOffset', ctypes.c_ubyte),
('LFHFlags', ctypes.c_ubyte),
]
struct__HEAP_FREE_ENTRY_0_1._pack_ = True # source:False
struct__HEAP_FREE_ENTRY_0_1._fields_ = [
('SubSegmentCode', POINTER_T(None)),
('PreviousSize', ctypes.c_uint16),
('_2', union__HEAP_FREE_ENTRY_0_1_0),
('UnusedBytes', ctypes.c_ubyte),
]
class struct__HEAP_FREE_ENTRY_0_5(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('AgregateCode', ctypes.c_uint64),
('FreeList', LIST_ENTRY),
]
class struct__HEAP_FREE_ENTRY_0_2(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('FunctionIndex', ctypes.c_uint16),
('ContextValue', ctypes.c_uint16),
]
class struct__HEAP_FREE_ENTRY_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Size', ctypes.c_uint16),
('Flags', ctypes.c_ubyte),
('SmallTagIndex', ctypes.c_ubyte),
]
class struct__HEAP_FREE_ENTRY_0_4(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Code1', ctypes.c_uint32),
('Code2', ctypes.c_uint16),
('Code3', ctypes.c_ubyte),
('Code4', ctypes.c_ubyte),
]
class struct__HEAP_FREE_ENTRY_0_3(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('InterceptorValue', ctypes.c_uint32),
('UnusedBytesLength', ctypes.c_uint16),
('EntryOffset', ctypes.c_ubyte),
('ExtendedBlockSignature', ctypes.c_ubyte),
]
union__HEAP_FREE_ENTRY_0._pack_ = True # source:False
union__HEAP_FREE_ENTRY_0._fields_ = [
('_0', struct__HEAP_FREE_ENTRY_0_0),
('_1', struct__HEAP_FREE_ENTRY_0_1),
('_2', struct__HEAP_FREE_ENTRY_0_2),
('_3', struct__HEAP_FREE_ENTRY_0_3),
('_4', struct__HEAP_FREE_ENTRY_0_4),
('_5', struct__HEAP_FREE_ENTRY_0_5),
]
struct__HEAP_FREE_ENTRY._pack_ = True # source:True
struct__HEAP_FREE_ENTRY._fields_ = [
('_0', union__HEAP_FREE_ENTRY_0),
]
HEAP_FREE_ENTRY = struct__HEAP_FREE_ENTRY
PPHEAP_FREE_ENTRY = POINTER_T(POINTER_T(struct__HEAP_FREE_ENTRY))
PHEAP_FREE_ENTRY = POINTER_T(struct__HEAP_FREE_ENTRY)
class struct__HEAP_VIRTUAL_ALLOC_ENTRY(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('Entry', LIST_ENTRY),
('ExtraStuff', HEAP_ENTRY_EXTRA),
('CommitSize', ctypes.c_uint32),
('ReserveSize', ctypes.c_uint32),
('BusyBlock', HEAP_ENTRY),
]
HEAP_VIRTUAL_ALLOC_ENTRY = struct__HEAP_VIRTUAL_ALLOC_ENTRY
PPHEAP_VIRTUAL_ALLOC_ENTRY = POINTER_T(POINTER_T(struct__HEAP_VIRTUAL_ALLOC_ENTRY))
PHEAP_VIRTUAL_ALLOC_ENTRY = POINTER_T(struct__HEAP_VIRTUAL_ALLOC_ENTRY)
class struct_c__SA_RTL_CRITICAL_SECTION(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP_LOCK(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP_USERDATA_HEADER(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_HEAP_SUBSEGMENT(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_RTL_CRITICAL_SECTION_DEBUG(ctypes.Structure):
_pack_ = True # source:True
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
__all__ = \
['struct__HEAP_SUBSEGMENT_0_0', 'struct__INTERLOCK_SEQ_0_0',
'PPHEAP_VIRTUAL_ALLOC_ENTRY', 'HEAP_VIRTUAL_ALLOC_ENTRY', 'VOID',
'HEAP_FREE_ENTRY', 'PUINT32', 'struct__HEAP_USERDATA_HEADER',
'int_fast32_t', 'union_c__UA_SLIST_HEADER', 'PPHEAP_SEGMENT',
'struct__HEAP_FREE_ENTRY_0_2', 'struct__HEAP_FREE_ENTRY_0_3',
'INTERLOCK_SEQ', 'PPSLIST_HEADER', 'struct__HEAP_FREE_ENTRY_0_4',
'struct__HEAP_FREE_ENTRY_0_5', 'PHEAP_PSEUDO_TAG_ENTRY',
'uint8_t', 'union__INTERLOCK_SEQ_0', 'PPVOID',
'struct_c__SA_RTL_CRITICAL_SECTION_DEBUG', 'LFH_BLOCK_ZONE',
'PVOID32', 'struct_c__SA_HEAP_TAG_ENTRY', 'uint_least16_t',
'struct__HEAP_ENTRY_EXTRA', 'UINT16', 'PHEAP_BUCKET_RUN_INFO',
'union__HEAP_ENTRY_0_1_0', 'PUSHORT', 'PUCHAR',
'struct__HEAP_ENTRY_0_2', 'intptr_t', 'struct__HEAP_ENTRY_0_3',
'HEAP_BUCKET_RUN_INFO', 'int_fast8_t', 'PHEAP_BUCKET_COUNTERS',
'HEAP_ENTRY', 'PPHEAP_USERDATA_HEADER', 'RTL_CRITICAL_SECTION',
'union__SLIST_HEADER', 'PPHEAP_COUNTERS',
'union__HEAP_BUCKET_COUNTERS', 'PPHEAP_TAG_ENTRY',
'struct_c__SA_HEAP_PSEUDO_TAG_ENTRY', 'struct__HEAP_SUBSEGMENT',
'DOUBLE', 'PPRTL_CRITICAL_SECTION_DEBUG', 'INT8', 'uint_fast16_t',
'PPHEAP_TUNING_PARAMETERS', 'struct__LFH_BLOCK_ZONE',
'PPHEAP_LOCAL_DATA', 'struct__HEAP_ENTRY', 'PINTERLOCK_SEQ',
'SLIST_HEADER', 'PLONG', 'LONGLONG', 'PHEAP_SEGMENT', 'SHORT',
'struct__LFH_HEAP', 'struct__SLIST_HEADER_0', 'PINT8',
'struct__HEAP_BUCKET_RUN_INFO_0',
'struct__HEAP_BUCKET_RUN_INFO_1', 'intmax_t', 'int16_t',
'PPHEAP_LIST_LOOKUP', 'struct__HEAP_FREE_ENTRY_0_0',
'int_fast64_t', 'HEAP_TAG_ENTRY', 'HEAP_BUCKET',
'struct__HEAP_FREE_ENTRY_0_1', 'struct_c__SA_LIST_ENTRY', 'ULONG',
'struct__HEAP', 'int_least8_t', 'union__HEAP_FREE_ENTRY_0_1_0',
'HEAP_USERDATA_HEADER', 'HEAP_LOOKASIDE', 'struct__INTERLOCK_SEQ',
'struct__HEAP_LOCK', 'struct__SINGLE_LIST_ENTRY', 'PPHEAP',
'int_least16_t', 'UINT32', 'uint_least8_t', 'HEAP_LOCAL_DATA',
'struct__INTERLOCK_SEQ_0_1', 'PBOOL', 'struct__INTERLOCK_SEQ_0_2',
'UCHAR', 'struct_c__SA_HEAP_LOCK', 'PHEAP_TUNING_PARAMETERS',
'PPVOID32', 'PVOID64', 'union__HEAP_SUBSEGMENT_0',
'struct__LIST_ENTRY', 'PPHEAP_LOOKASIDE', 'uint64_t',
'PHEAP_LOCK', 'PHEAP_TAG_ENTRY', 'HEAP_LOCAL_SEGMENT_INFO',
'struct__HEAP_LOCAL_SEGMENT_INFO', 'HEAP_SEGMENT',
'union__HEAP_BUCKET_RUN_INFO', 'HEAP_COUNTERS',
'union__HEAP_FREE_ENTRY_0', 'PULONG',
'struct__HEAP_BUCKET_COUNTERS_1',
'struct__HEAP_BUCKET_COUNTERS_0', 'struct__HEAP_LOCAL_DATA',
'WCHAR', 'union__HEAP_ENTRY_EXTRA_0', 'uint16_t', 'uint_fast8_t',
'struct__RTL_CRITICAL_SECTION', 'PPHEAP_BUCKET',
'struct__HEAP_ENTRY_0_0', 'struct__HEAP_ENTRY_0_1', 'int32_t',
'uint_least64_t', 'struct__HEAP_ENTRY_0_4',
'struct__HEAP_ENTRY_0_5', 'CHAR', 'LONG',
'struct_c__SA_HEAP_COUNTERS', 'PULONGLONG', 'PLFH_BLOCK_ZONE',
'struct__HEAP_LIST_LOOKUP', 'PPHEAP_BUCKET_COUNTERS',
'PSINGLE_LIST_ENTRY', 'struct__HEAP_BUCKET', 'PSLIST_HEADER',
'struct_c__SA_RTL_CRITICAL_SECTION',
'struct__HEAP_UCR_DESCRIPTOR', 'struct__HEAP_BUCKET_0',
'PHEAP_LOCAL_DATA', 'PUINT64', 'struct_c__SA_INTERLOCK_SEQ',
'struct__HEAP_LOOKASIDE', 'PRTL_CRITICAL_SECTION_DEBUG',
'uint_least32_t', 'int_least64_t', 'struct__HEAP_SEGMENT',
'struct__HEAP_VIRTUAL_ALLOC_ENTRY', 'PPSINGLE_LIST_ENTRY',
'uintptr_t', 'PRTL_CRITICAL_SECTION', 'PPHEAP_LOCAL_SEGMENT_INFO',
'PPHEAP_FREE_ENTRY', 'RTL_CRITICAL_SECTION_DEBUG', 'PLFH_HEAP',
'PHEAP_LOCAL_SEGMENT_INFO', 'struct__HEAP_ENTRY_EXTRA_0_1',
'UINT8', 'int8_t', 'PPHEAP_PSEUDO_TAG_ENTRY', 'PLONGLONG',
'PPUSER_MEMORY_CACHE_ENTRY', 'PUSER_MEMORY_CACHE_ENTRY',
'PPLFH_BLOCK_ZONE', 'PHEAP_FREE_ENTRY', 'PPHEAP_LOCK',
'struct__HEAP_TUNING_PARAMETERS', 'PHEAP_LIST_LOOKUP',
'HEAP_TUNING_PARAMETERS', 'UINT64', 'PPHEAP_SUBSEGMENT',
'LFH_HEAP', 'PHEAP_BUCKET', 'USHORT', 'HEAP_ENTRY_EXTRA',
'union__HEAP_ENTRY_0', 'HEAP_PSEUDO_TAG_ENTRY', 'BOOL',
'HEAP_LOCK', 'PPINTERLOCK_SEQ', 'LIST_ENTRY',
'USER_MEMORY_CACHE_ENTRY', 'union__HEAP_USERDATA_HEADER_0',
'PUINT16', 'PHEAP_UCR_DESCRIPTOR', 'PHEAP', 'PPVOID64',
'PLIST_ENTRY', 'struct__HEAP_COUNTERS',
'struct_c__SA_HEAP_SUBSEGMENT', 'uintmax_t', 'uint_fast32_t',
'PHEAP_SUBSEGMENT', 'struct__HEAP_USERDATA_HEADER_0_0', 'int64_t',
'int_fast16_t', 'HEAP_UCR_DESCRIPTOR', 'PHEAP_USERDATA_HEADER',
'PHEAP_LOOKASIDE', 'PHEAP_ENTRY_EXTRA',
'struct_c__SA_HEAP_TUNING_PARAMETERS',
'struct_c__SA_SINGLE_LIST_ENTRY',
'struct__USER_MEMORY_CACHE_ENTRY',
'struct_c__SA_HEAP_USERDATA_HEADER', 'PPLIST_ENTRY',
'struct__RTL_CRITICAL_SECTION_DEBUG', 'HEAP_SUBSEGMENT',
'int_least32_t', 'PCHAR', 'struct_c__SA_HEAP_ENTRY', 'ULONGLONG',
'struct_c__SA_HEAP', 'PPHEAP_ENTRY_EXTRA',
'struct__HEAP_TAG_ENTRY', 'PSHORT', 'struct__HEAP_FREE_ENTRY',
'HEAP', 'PHEAP_VIRTUAL_ALLOC_ENTRY', 'PPLFH_HEAP',
'union__HEAP_LOCK_0', 'PUINT8', 'PHEAP_COUNTERS',
'SINGLE_LIST_ENTRY', 'struct__HEAP_PSEUDO_TAG_ENTRY',
'PPHEAP_BUCKET_RUN_INFO', 'uint_fast64_t', 'PHEAP_ENTRY',
'struct__HEAP_ENTRY_EXTRA_0_0', 'PPHEAP_ENTRY',
'PPRTL_CRITICAL_SECTION', 'HEAP_LIST_LOOKUP', 'uint32_t',
'PPHEAP_UCR_DESCRIPTOR', 'PVOID', 'HEAP_BUCKET_COUNTERS']
|
gpl-3.0
|
fkolacek/FIT-VUT
|
bp-revok/python/lib/python2.7/site-packages/Werkzeug-0.9.6-py2.7.egg/werkzeug/debug/repr.py
|
280
|
9350
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.repr
~~~~~~~~~~~~~~~~~~~
This module implements object representations for debugging purposes.
Unlike the default repr these reprs expose a lot more information and
produce HTML instead of ASCII.
Together with the CSS and JavaScript files of the debugger this gives
a colorful and more compact output.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import re
import codecs
from traceback import format_exception_only
try:
from collections import deque
except ImportError: # pragma: no cover
deque = None
from werkzeug.utils import escape
from werkzeug._compat import iteritems, PY2, text_type, integer_types, \
string_types
missing = object()
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
RegexType = type(_paragraph_re)
HELP_HTML = '''\
<div class=box>
<h3>%(title)s</h3>
<pre class=help>%(text)s</pre>
</div>\
'''
OBJECT_DUMP_HTML = '''\
<div class=box>
<h3>%(title)s</h3>
%(repr)s
<table>%(items)s</table>
</div>\
'''
def debug_repr(obj):
"""Creates a debug repr of an object as HTML unicode string."""
return DebugReprGenerator().repr(obj)
def dump(obj=missing):
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv)
class _Helper(object):
"""Displays an HTML version of the normal help, for the interactive
debugger only because it requires a patched sys.stdout.
"""
def __repr__(self):
return 'Type help(object) for help about object.'
def __call__(self, topic=None):
if topic is None:
sys.stdout._write('<span class=help>%s</span>' % repr(self))
return
import pydoc
pydoc.help(topic)
rv = sys.stdout.reset()
if isinstance(rv, bytes):
rv = rv.decode('utf-8', 'ignore')
paragraphs = _paragraph_re.split(rv)
if len(paragraphs) > 1:
title = paragraphs[0]
text = '\n\n'.join(paragraphs[1:])
else: # pragma: no cover
title = 'Help'
text = paragraphs[0]
sys.stdout._write(HELP_HTML % {'title': title, 'text': text})
helper = _Helper()
def _add_subclass_info(inner, obj, base):
if isinstance(base, tuple):
for base in base:
if type(obj) is base:
return inner
elif type(obj) is base:
return inner
module = ''
if obj.__class__.__module__ not in ('__builtin__', 'exceptions'):
module = '<span class="module">%s.</span>' % obj.__class__.__module__
return '%s%s(%s)' % (module, obj.__class__.__name__, inner)
class DebugReprGenerator(object):
def __init__(self):
self._stack = []
def _sequence_repr_maker(left, right, base=object(), limit=8):
def proxy(self, obj, recursive):
if recursive:
return _add_subclass_info(left + '...' + right, obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(', ')
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append('</span>')
buf.append(right)
return _add_subclass_info(u''.join(buf), obj, base)
return proxy
list_repr = _sequence_repr_maker('[', ']', list)
tuple_repr = _sequence_repr_maker('(', ')', tuple)
set_repr = _sequence_repr_maker('set([', '])', set)
frozenset_repr = _sequence_repr_maker('frozenset([', '])', frozenset)
if deque is not None:
deque_repr = _sequence_repr_maker('<span class="module">collections.'
'</span>deque([', '])', deque)
del _sequence_repr_maker
def regex_repr(self, obj):
pattern = repr(obj.pattern)
if PY2:
pattern = pattern.decode('string-escape', 'ignore')
else:
pattern = codecs.decode(pattern, 'unicode-escape', 'ignore')
if pattern[:1] == 'u':
pattern = 'ur' + pattern[1:]
else:
pattern = 'r' + pattern
return u're.compile(<span class="string regex">%s</span>)' % pattern
def string_repr(self, obj, limit=70):
buf = ['<span class="string">']
escaped = escape(obj)
a = repr(escaped[:limit])
b = repr(escaped[limit:])
if isinstance(obj, text_type) and PY2:
buf.append('u')
a = a[1:]
b = b[1:]
if b != "''":
buf.extend((a[:-1], '<span class="extended">', b[1:], '</span>'))
else:
buf.append(a)
buf.append('</span>')
return _add_subclass_info(u''.join(buf), obj, (bytes, text_type))
def dict_repr(self, d, recursive, limit=5):
if recursive:
return _add_subclass_info(u'{...}', d, dict)
buf = ['{']
have_extended_section = False
for idx, (key, value) in enumerate(iteritems(d)):
if idx:
buf.append(', ')
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append('<span class="pair"><span class="key">%s</span>: '
'<span class="value">%s</span></span>' %
(self.repr(key), self.repr(value)))
if have_extended_section:
buf.append('</span>')
buf.append('}')
return _add_subclass_info(u''.join(buf), d, dict)
def object_repr(self, obj):
r = repr(obj)
if PY2:
r = r.decode('utf-8', 'replace')
return u'<span class="object">%s</span>' % escape(r)
def dispatch_repr(self, obj, recursive):
if obj is helper:
return u'<span class="help">%r</span>' % helper
if isinstance(obj, (integer_types, float, complex)):
return u'<span class="number">%r</span>' % obj
if isinstance(obj, string_types):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if deque is not None and isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self):
try:
info = ''.join(format_exception_only(*sys.exc_info()[:2]))
except Exception: # pragma: no cover
info = '?'
if PY2:
info = info.decode('utf-8', 'ignore')
return u'<span class="brokenrepr"><broken repr (%s)>' \
u'</span>' % escape(info.strip())
def repr(self, obj):
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj):
repr = items = None
if isinstance(obj, dict):
title = 'Contents of'
items = []
for key, value in iteritems(obj):
if not isinstance(key, string_types):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = 'Details for'
title += ' ' + object.__repr__(obj)[1:-1]
return self.render_object_dump(items, title, repr)
def dump_locals(self, d):
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, 'Local variables in frame')
def render_object_dump(self, items, title, repr=None):
html_items = []
for key, value in items:
html_items.append('<tr><th>%s<td><pre class=repr>%s</pre>' %
(escape(key), value))
if not html_items:
html_items.append('<tr><td><em>Nothing</em>')
return OBJECT_DUMP_HTML % {
'title': escape(title),
'repr': repr and '<pre class=repr>%s</pre>' % repr or '',
'items': '\n'.join(html_items)
}
|
apache-2.0
|
laurent-george/bokeh
|
examples/glyphs/trail.py
|
33
|
4656
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.widgets import VBox
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5*theta)**2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1)*cos(phi2)*haversin(delta_lon)
return 2*R*atan2(sqrt(a), sqrt(1-a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([ distance(latlon[i+1], latlon[i]) for i in range(len((latlon[:-1]))) ])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100*np.diff(df.alt)/(1000*dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
title = "Obiszów MTB XCM"
def trail_map(data):
lon = (min(data.lon) + max(data.lon))/2
lat = (min(data.lat) + max(data.lat))/2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(title="%s - Trail Map" % title, map_options=map_options, plot_width=800, plot_height=800)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
def altitude_profile(data):
plot = Plot(title="%s - Altitude Profile" % title, plot_width=800, plot_height=400)
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs = [ [X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys = [ [y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color = data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(
x = data.dist,
y = data.alt,
))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = VBox(children=[altitude, trail])
doc = Document()
doc.add(layout)
if __name__ == "__main__":
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
|
avtoritet/httpie
|
tests/test_cli.py
|
46
|
11737
|
"""CLI argument parsing related tests."""
import json
# noinspection PyCompatibility
import argparse
import pytest
from requests.exceptions import InvalidSchema
from httpie import input
from httpie.input import KeyValue, KeyValueArgType, DataDict
from httpie import ExitStatus
from httpie.cli import parser
from utils import TestEnvironment, http, HTTP_OK
from fixtures import (
FILE_PATH_ARG, JSON_FILE_PATH_ARG,
JSON_FILE_CONTENT, FILE_CONTENT, FILE_PATH
)
class TestItemParsing:
key_value = KeyValueArgType(*input.SEP_GROUP_ALL_ITEMS)
def test_invalid_items(self):
items = ['no-separator']
for item in items:
pytest.raises(argparse.ArgumentTypeError, self.key_value, item)
def test_escape_separator(self):
items = input.parse_items([
# headers
self.key_value(r'foo\:bar:baz'),
self.key_value(r'jack\@jill:hill'),
# data
self.key_value(r'baz\=bar=foo'),
# files
self.key_value(r'bar\@baz@%s' % FILE_PATH_ARG),
])
# `requests.structures.CaseInsensitiveDict` => `dict`
headers = dict(items.headers._store.values())
assert headers == {
'foo:bar': 'baz',
'jack@jill': 'hill',
}
assert items.data == {'baz=bar': 'foo'}
assert 'bar@baz' in items.files
@pytest.mark.parametrize(('string', 'key', 'sep', 'value'), [
('path=c:\windows', 'path', '=', 'c:\windows'),
('path=c:\windows\\', 'path', '=', 'c:\windows\\'),
('path\==c:\windows', 'path=', '=', 'c:\windows'),
])
def test_backslash_before_non_special_character_does_not_escape(
self, string, key, sep, value):
expected = KeyValue(orig=string, key=key, sep=sep, value=value)
actual = self.key_value(string)
assert actual == expected
def test_escape_longsep(self):
items = input.parse_items([
self.key_value(r'bob\:==foo'),
])
assert items.params == {'bob:': 'foo'}
def test_valid_items(self):
items = input.parse_items([
self.key_value('string=value'),
self.key_value('header:value'),
self.key_value('list:=["a", 1, {}, false]'),
self.key_value('obj:={"a": "b"}'),
self.key_value('eh:'),
self.key_value('ed='),
self.key_value('bool:=true'),
self.key_value('file@' + FILE_PATH_ARG),
self.key_value('query==value'),
self.key_value('string-embed=@' + FILE_PATH_ARG),
self.key_value('raw-json-embed:=@' + JSON_FILE_PATH_ARG),
])
# Parsed headers
# `requests.structures.CaseInsensitiveDict` => `dict`
headers = dict(items.headers._store.values())
assert headers == {'header': 'value', 'eh': ''}
# Parsed data
raw_json_embed = items.data.pop('raw-json-embed')
assert raw_json_embed == json.loads(JSON_FILE_CONTENT)
items.data['string-embed'] = items.data['string-embed'].strip()
assert dict(items.data) == {
"ed": "",
"string": "value",
"bool": True,
"list": ["a", 1, {}, False],
"obj": {"a": "b"},
"string-embed": FILE_CONTENT,
}
# Parsed query string parameters
assert items.params == {'query': 'value'}
# Parsed file fields
assert 'file' in items.files
assert (items.files['file'][1].read().strip().decode('utf8')
== FILE_CONTENT)
def test_multiple_file_fields_with_same_field_name(self):
items = input.parse_items([
self.key_value('file_field@' + FILE_PATH_ARG),
self.key_value('file_field@' + FILE_PATH_ARG),
])
assert len(items.files['file_field']) == 2
def test_multiple_text_fields_with_same_field_name(self):
items = input.parse_items(
[self.key_value('text_field=a'),
self.key_value('text_field=b')],
data_class=DataDict
)
assert items.data['text_field'] == ['a', 'b']
assert list(items.data.items()) == [
('text_field', 'a'),
('text_field', 'b'),
]
class TestQuerystring:
def test_query_string_params_in_url(self, httpbin):
r = http('--print=Hhb', 'GET', httpbin.url + '/get?a=1&b=2')
path = '/get?a=1&b=2'
url = httpbin.url + path
assert HTTP_OK in r
assert 'GET %s HTTP/1.1' % path in r
assert '"url": "%s"' % url in r
def test_query_string_params_items(self, httpbin):
r = http('--print=Hhb', 'GET', httpbin.url + '/get', 'a==1')
path = '/get?a=1'
url = httpbin.url + path
assert HTTP_OK in r
assert 'GET %s HTTP/1.1' % path in r
assert '"url": "%s"' % url in r
def test_query_string_params_in_url_and_items_with_duplicates(self,
httpbin):
r = http('--print=Hhb', 'GET',
httpbin.url + '/get?a=1&a=1', 'a==1', 'a==1')
path = '/get?a=1&a=1&a=1&a=1'
url = httpbin.url + path
assert HTTP_OK in r
assert 'GET %s HTTP/1.1' % path in r
assert '"url": "%s"' % url in r
class TestURLshorthand:
def test_expand_localhost_shorthand(self):
args = parser.parse_args(args=[':'], env=TestEnvironment())
assert args.url == 'http://localhost'
def test_expand_localhost_shorthand_with_slash(self):
args = parser.parse_args(args=[':/'], env=TestEnvironment())
assert args.url == 'http://localhost/'
def test_expand_localhost_shorthand_with_port(self):
args = parser.parse_args(args=[':3000'], env=TestEnvironment())
assert args.url == 'http://localhost:3000'
def test_expand_localhost_shorthand_with_path(self):
args = parser.parse_args(args=[':/path'], env=TestEnvironment())
assert args.url == 'http://localhost/path'
def test_expand_localhost_shorthand_with_port_and_slash(self):
args = parser.parse_args(args=[':3000/'], env=TestEnvironment())
assert args.url == 'http://localhost:3000/'
def test_expand_localhost_shorthand_with_port_and_path(self):
args = parser.parse_args(args=[':3000/path'], env=TestEnvironment())
assert args.url == 'http://localhost:3000/path'
def test_dont_expand_shorthand_ipv6_as_shorthand(self):
args = parser.parse_args(args=['::1'], env=TestEnvironment())
assert args.url == 'http://::1'
def test_dont_expand_longer_ipv6_as_shorthand(self):
args = parser.parse_args(
args=['::ffff:c000:0280'],
env=TestEnvironment()
)
assert args.url == 'http://::ffff:c000:0280'
def test_dont_expand_full_ipv6_as_shorthand(self):
args = parser.parse_args(
args=['0000:0000:0000:0000:0000:0000:0000:0001'],
env=TestEnvironment()
)
assert args.url == 'http://0000:0000:0000:0000:0000:0000:0000:0001'
class TestArgumentParser:
def setup_method(self, method):
self.parser = input.Parser()
def test_guess_when_method_set_and_valid(self):
self.parser.args = argparse.Namespace()
self.parser.args.method = 'GET'
self.parser.args.url = 'http://example.com/'
self.parser.args.items = []
self.parser.args.ignore_stdin = False
self.parser.env = TestEnvironment()
self.parser._guess_method()
assert self.parser.args.method == 'GET'
assert self.parser.args.url == 'http://example.com/'
assert self.parser.args.items == []
def test_guess_when_method_not_set(self):
self.parser.args = argparse.Namespace()
self.parser.args.method = None
self.parser.args.url = 'http://example.com/'
self.parser.args.items = []
self.parser.args.ignore_stdin = False
self.parser.env = TestEnvironment()
self.parser._guess_method()
assert self.parser.args.method == 'GET'
assert self.parser.args.url == 'http://example.com/'
assert self.parser.args.items == []
def test_guess_when_method_set_but_invalid_and_data_field(self):
self.parser.args = argparse.Namespace()
self.parser.args.method = 'http://example.com/'
self.parser.args.url = 'data=field'
self.parser.args.items = []
self.parser.args.ignore_stdin = False
self.parser.env = TestEnvironment()
self.parser._guess_method()
assert self.parser.args.method == 'POST'
assert self.parser.args.url == 'http://example.com/'
assert self.parser.args.items == [
KeyValue(key='data',
value='field',
sep='=',
orig='data=field')
]
def test_guess_when_method_set_but_invalid_and_header_field(self):
self.parser.args = argparse.Namespace()
self.parser.args.method = 'http://example.com/'
self.parser.args.url = 'test:header'
self.parser.args.items = []
self.parser.args.ignore_stdin = False
self.parser.env = TestEnvironment()
self.parser._guess_method()
assert self.parser.args.method == 'GET'
assert self.parser.args.url == 'http://example.com/'
assert self.parser.args.items, [
KeyValue(key='test',
value='header',
sep=':',
orig='test:header')
]
def test_guess_when_method_set_but_invalid_and_item_exists(self):
self.parser.args = argparse.Namespace()
self.parser.args.method = 'http://example.com/'
self.parser.args.url = 'new_item=a'
self.parser.args.items = [
KeyValue(
key='old_item', value='b', sep='=', orig='old_item=b')
]
self.parser.args.ignore_stdin = False
self.parser.env = TestEnvironment()
self.parser._guess_method()
assert self.parser.args.items, [
KeyValue(key='new_item', value='a', sep='=', orig='new_item=a'),
KeyValue(
key='old_item', value='b', sep='=', orig='old_item=b'),
]
class TestNoOptions:
def test_valid_no_options(self, httpbin):
r = http('--verbose', '--no-verbose', 'GET', httpbin.url + '/get')
assert 'GET /get HTTP/1.1' not in r
def test_invalid_no_options(self, httpbin):
r = http('--no-war', 'GET', httpbin.url + '/get',
error_exit_ok=True)
assert r.exit_status == 1
assert 'unrecognized arguments: --no-war' in r.stderr
assert 'GET /get HTTP/1.1' not in r
class TestIgnoreStdin:
def test_ignore_stdin(self, httpbin):
with open(FILE_PATH) as f:
env = TestEnvironment(stdin=f, stdin_isatty=False)
r = http('--ignore-stdin', '--verbose', httpbin.url + '/get',
env=env)
assert HTTP_OK in r
assert 'GET /get HTTP' in r, "Don't default to POST."
assert FILE_CONTENT not in r, "Don't send stdin data."
def test_ignore_stdin_cannot_prompt_password(self, httpbin):
r = http('--ignore-stdin', '--auth=no-password', httpbin.url + '/get',
error_exit_ok=True)
assert r.exit_status == ExitStatus.ERROR
assert 'because --ignore-stdin' in r.stderr
class TestSchemes:
def test_custom_scheme(self):
# InvalidSchema is expected because HTTPie
# shouldn't touch a formally valid scheme.
with pytest.raises(InvalidSchema):
http('foo+bar-BAZ.123://bah')
|
bsd-3-clause
|
ryanfitch/Python3_Koans_Solutions
|
python3/koans/about_generators.py
|
95
|
4406
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Written in place of AboutBlocks in the Ruby Koans
#
# Note: Both blocks and generators use a yield keyword, but they behave
# a lot differently
#
from runner.koan import *
class AboutGenerators(Koan):
def test_generating_values_on_the_fly(self):
result = list()
bacon_generator = (n + ' bacon' for n in ['crunchy','veggie','danish'])
for bacon in bacon_generator:
result.append(bacon)
self.assertEqual(__, result)
def test_generators_are_different_to_list_comprehensions(self):
num_list = [x*2 for x in range(1,3)]
num_generator = (x*2 for x in range(1,3))
self.assertEqual(2, num_list[0])
# A generator has to be iterated through.
with self.assertRaises(___): num = num_generator[0]
self.assertEqual(__, list(num_generator)[0])
# Both list comprehensions and generators can be iterated though. However, a generator
# function is only called on the first iteration. The values are generated on the fly
# instead of stored.
#
# Generators are more memory friendly, but less versatile
def test_generator_expressions_are_a_one_shot_deal(self):
dynamite = ('Boom!' for n in range(3))
attempt1 = list(dynamite)
attempt2 = list(dynamite)
self.assertEqual(__, list(attempt1))
self.assertEqual(__, list(attempt2))
# ------------------------------------------------------------------
def simple_generator_method(self):
yield 'peanut'
yield 'butter'
yield 'and'
yield 'jelly'
def test_generator_method_will_yield_values_during_iteration(self):
result = list()
for item in self.simple_generator_method():
result.append(item)
self.assertEqual(__, result)
def test_coroutines_can_take_arguments(self):
result = self.simple_generator_method()
self.assertEqual(__, next(result))
self.assertEqual(__, next(result))
result.close()
# ------------------------------------------------------------------
def square_me(self, seq):
for x in seq:
yield x * x
def test_generator_method_with_parameter(self):
result = self.square_me(range(2,5))
self.assertEqual(__, list(result))
# ------------------------------------------------------------------
def sum_it(self, seq):
value = 0
for num in seq:
# The local state of 'value' will be retained between iterations
value += num
yield value
def test_generator_keeps_track_of_local_variables(self):
result = self.sum_it(range(2,5))
self.assertEqual(__, list(result))
# ------------------------------------------------------------------
def generator_with_coroutine(self):
result = yield
yield result
def test_generators_can_take_coroutines(self):
generator = self.generator_with_coroutine()
# THINK ABOUT IT:
# Why is this line necessary?
#
# Hint: Read the "Specification: Sending Values into Generators"
# section of http://www.python.org/dev/peps/pep-0342/
next(generator)
self.assertEqual(__, generator.send(1 + 2))
def test_before_sending_a_value_to_a_generator_next_must_be_called(self):
generator = self.generator_with_coroutine()
try:
generator.send(1+2)
except TypeError as ex:
ex2 = ex
self.assertRegexpMatches(ex2.args[0], __)
# ------------------------------------------------------------------
def yield_tester(self):
value = yield
if value:
yield value
else:
yield 'no value'
def test_generators_can_see_if_they_have_been_called_with_a_value(self):
generator = self.yield_tester()
next(generator)
self.assertEqual('with value', generator.send('with value'))
generator2 = self.yield_tester()
next(generator2)
self.assertEqual(__, next(generator2))
def test_send_none_is_equivalent_to_next(self):
generator = self.yield_tester()
next(generator)
# 'next(generator)' is exactly equivalent to 'generator.send(None)'
self.assertEqual(__, generator.send(None))
|
mit
|
teltek/edx-platform
|
openedx/core/djangoapps/contentserver/middleware.py
|
4
|
15575
|
"""
Middleware to serve assets.
"""
import logging
import datetime
log = logging.getLogger(__name__)
try:
import newrelic.agent
except ImportError:
newrelic = None # pylint: disable=invalid-name
from django.http import (
HttpResponse, HttpResponseNotModified, HttpResponseForbidden,
HttpResponseBadRequest, HttpResponseNotFound, HttpResponsePermanentRedirect)
from six import text_type
from student.models import CourseEnrollment
from xmodule.assetstore.assetmgr import AssetManager
from xmodule.contentstore.content import StaticContent, XASSET_LOCATION_TAG
from xmodule.modulestore import InvalidLocationError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locator import AssetLocator
from openedx.core.djangoapps.header_control import force_header_for_response
from .caching import get_cached_content, set_cached_content
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.exceptions import NotFoundError
from .models import CourseAssetCacheTtlConfig, CdnUserAgentsConfig
# TODO: Soon as we have a reasonable way to serialize/deserialize AssetKeys, we need
# to change this file so instead of using course_id_partial, we're just using asset keys
HTTP_DATE_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
class StaticContentServer(object):
"""
Serves course assets to end users. Colloquially referred to as "contentserver."
"""
def is_asset_request(self, request):
"""Determines whether the given request is an asset request"""
return (
request.path.startswith('/' + XASSET_LOCATION_TAG + '/')
or
request.path.startswith('/' + AssetLocator.CANONICAL_NAMESPACE)
or
StaticContent.is_versioned_asset_path(request.path)
)
# pylint: disable=too-many-statements
def process_request(self, request):
"""Process the given request"""
asset_path = request.path
if self.is_asset_request(request):
# Make sure we can convert this request into a location.
if AssetLocator.CANONICAL_NAMESPACE in asset_path:
asset_path = asset_path.replace('block/', 'block@', 1)
# If this is a versioned request, pull out the digest and chop off the prefix.
requested_digest = None
if StaticContent.is_versioned_asset_path(asset_path):
requested_digest, asset_path = StaticContent.parse_versioned_asset_path(asset_path)
# Make sure we have a valid location value for this asset.
try:
loc = StaticContent.get_location_from_path(asset_path)
except (InvalidLocationError, InvalidKeyError):
return HttpResponseBadRequest()
# Attempt to load the asset to make sure it exists, and grab the asset digest
# if we're able to load it.
actual_digest = None
try:
content = self.load_asset_from_location(loc)
actual_digest = getattr(content, "content_digest", None)
except (ItemNotFoundError, NotFoundError):
return HttpResponseNotFound()
# If this was a versioned asset, and the digest doesn't match, redirect
# them to the actual version.
if requested_digest is not None and actual_digest is not None and (actual_digest != requested_digest):
actual_asset_path = StaticContent.add_version_to_asset_path(asset_path, actual_digest)
return HttpResponsePermanentRedirect(actual_asset_path)
# Set the basics for this request. Make sure that the course key for this
# asset has a run, which old-style courses do not. Otherwise, this will
# explode when the key is serialized to be sent to NR.
safe_course_key = loc.course_key
if safe_course_key.run is None:
safe_course_key = safe_course_key.replace(run='only')
if newrelic:
newrelic.agent.add_custom_parameter('course_id', safe_course_key)
newrelic.agent.add_custom_parameter('org', loc.org)
newrelic.agent.add_custom_parameter('contentserver.path', loc.path)
# Figure out if this is a CDN using us as the origin.
is_from_cdn = StaticContentServer.is_cdn_request(request)
newrelic.agent.add_custom_parameter('contentserver.from_cdn', is_from_cdn)
# Check if this content is locked or not.
locked = self.is_content_locked(content)
newrelic.agent.add_custom_parameter('contentserver.locked', locked)
# Check that user has access to the content.
if not self.is_user_authorized(request, content, loc):
return HttpResponseForbidden('Unauthorized')
# Figure out if the client sent us a conditional request, and let them know
# if this asset has changed since then.
last_modified_at_str = content.last_modified_at.strftime(HTTP_DATE_FORMAT)
if 'HTTP_IF_MODIFIED_SINCE' in request.META:
if_modified_since = request.META['HTTP_IF_MODIFIED_SINCE']
if if_modified_since == last_modified_at_str:
return HttpResponseNotModified()
# *** File streaming within a byte range ***
# If a Range is provided, parse Range attribute of the request
# Add Content-Range in the response if Range is structurally correct
# Request -> Range attribute structure: "Range: bytes=first-[last]"
# Response -> Content-Range attribute structure: "Content-Range: bytes first-last/totalLength"
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
response = None
if request.META.get('HTTP_RANGE'):
# If we have a StaticContent, get a StaticContentStream. Can't manipulate the bytes otherwise.
if isinstance(content, StaticContent):
content = AssetManager.find(loc, as_stream=True)
header_value = request.META['HTTP_RANGE']
try:
unit, ranges = parse_range_header(header_value, content.length)
except ValueError as exception:
# If the header field is syntactically invalid it should be ignored.
log.exception(
u"%s in Range header: %s for content: %s", text_type(exception), header_value, unicode(loc)
)
else:
if unit != 'bytes':
# Only accept ranges in bytes
log.warning(u"Unknown unit in Range header: %s for content: %s", header_value, text_type(loc))
elif len(ranges) > 1:
# According to Http/1.1 spec content for multiple ranges should be sent as a multipart message.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.16
# But we send back the full content.
log.warning(
u"More than 1 ranges in Range header: %s for content: %s", header_value, text_type(loc)
)
else:
first, last = ranges[0]
if 0 <= first <= last < content.length:
# If the byte range is satisfiable
response = HttpResponse(content.stream_data_in_range(first, last))
response['Content-Range'] = 'bytes {first}-{last}/{length}'.format(
first=first, last=last, length=content.length
)
response['Content-Length'] = str(last - first + 1)
response.status_code = 206 # Partial Content
if newrelic:
newrelic.agent.add_custom_parameter('contentserver.ranged', True)
else:
log.warning(
u"Cannot satisfy ranges in Range header: %s for content: %s",
header_value, text_type(loc)
)
return HttpResponse(status=416) # Requested Range Not Satisfiable
# If Range header is absent or syntactically invalid return a full content response.
if response is None:
response = HttpResponse(content.stream_data())
response['Content-Length'] = content.length
if newrelic:
newrelic.agent.add_custom_parameter('contentserver.content_len', content.length)
newrelic.agent.add_custom_parameter('contentserver.content_type', content.content_type)
# "Accept-Ranges: bytes" tells the user that only "bytes" ranges are allowed
response['Accept-Ranges'] = 'bytes'
response['Content-Type'] = content.content_type
response['X-Frame-Options'] = 'ALLOW'
# Set any caching headers, and do any response cleanup needed. Based on how much
# middleware we have in place, there's no easy way to use the built-in Django
# utilities and properly sanitize and modify a response to ensure that it is as
# cacheable as possible, which is why we do it ourselves.
self.set_caching_headers(content, response)
return response
def set_caching_headers(self, content, response):
"""
Sets caching headers based on whether or not the asset is locked.
"""
is_locked = getattr(content, "locked", False)
# We want to signal to the end user's browser, and to any intermediate proxies/caches,
# whether or not this asset is cacheable. If we have a TTL configured, we inform the
# caller, for unlocked assets, how long they are allowed to cache it. Since locked
# assets should be restricted to enrolled students, we simply send headers that
# indicate there should be no caching whatsoever.
cache_ttl = CourseAssetCacheTtlConfig.get_cache_ttl()
if cache_ttl > 0 and not is_locked:
if newrelic:
newrelic.agent.add_custom_parameter('contentserver.cacheable', True)
response['Expires'] = StaticContentServer.get_expiration_value(datetime.datetime.utcnow(), cache_ttl)
response['Cache-Control'] = "public, max-age={ttl}, s-maxage={ttl}".format(ttl=cache_ttl)
elif is_locked:
if newrelic:
newrelic.agent.add_custom_parameter('contentserver.cacheable', False)
response['Cache-Control'] = "private, no-cache, no-store"
response['Last-Modified'] = content.last_modified_at.strftime(HTTP_DATE_FORMAT)
# Force the Vary header to only vary responses on Origin, so that XHR and browser requests get cached
# separately and don't screw over one another. i.e. a browser request that doesn't send Origin, and
# caches a version of the response without CORS headers, in turn breaking XHR requests.
force_header_for_response(response, 'Vary', 'Origin')
@staticmethod
def is_cdn_request(request):
"""
Attempts to determine whether or not the given request is coming from a CDN.
Currently, this is a static check because edx.org only uses CloudFront, but may
be expanded in the future.
"""
cdn_user_agents = CdnUserAgentsConfig.get_cdn_user_agents()
user_agent = request.META.get('HTTP_USER_AGENT', '')
if user_agent in cdn_user_agents:
# This is a CDN request.
return True
return False
@staticmethod
def get_expiration_value(now, cache_ttl):
"""Generates an RFC1123 datetime string based on a future offset."""
expire_dt = now + datetime.timedelta(seconds=cache_ttl)
return expire_dt.strftime(HTTP_DATE_FORMAT)
def is_content_locked(self, content):
"""
Determines whether or not the given content is locked.
"""
return bool(getattr(content, "locked", False))
def is_user_authorized(self, request, content, location):
"""
Determines whether or not the user for this request is authorized to view the given asset.
"""
if not self.is_content_locked(content):
return True
if not hasattr(request, "user") or not request.user.is_authenticated:
return False
if not request.user.is_staff:
deprecated = getattr(location, 'deprecated', False)
if deprecated and not CourseEnrollment.is_enrolled_by_partial(request.user, location.course_key):
return False
if not deprecated and not CourseEnrollment.is_enrolled(request.user, location.course_key):
return False
return True
def load_asset_from_location(self, location):
"""
Loads an asset based on its location, either retrieving it from a cache
or loading it directly from the contentstore.
"""
# See if we can load this item from cache.
content = get_cached_content(location)
if content is None:
# Not in cache, so just try and load it from the asset manager.
try:
content = AssetManager.find(location, as_stream=True)
except (ItemNotFoundError, NotFoundError):
raise
# Now that we fetched it, let's go ahead and try to cache it. We cap this at 1MB
# because it's the default for memcached and also we don't want to do too much
# buffering in memory when we're serving an actual request.
if content.length is not None and content.length < 1048576:
content = content.copy_to_in_mem()
set_cached_content(content)
return content
def parse_range_header(header_value, content_length):
"""
Returns the unit and a list of (start, end) tuples of ranges.
Raises ValueError if header is syntactically invalid or does not contain a range.
See spec for details: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
"""
unit = None
ranges = []
if '=' in header_value:
unit, byte_ranges_string = header_value.split('=')
# Parse the byte ranges.
for byte_range_string in byte_ranges_string.split(','):
byte_range_string = byte_range_string.strip()
# Case 0:
if '-' not in byte_range_string: # Invalid syntax of header value.
raise ValueError('Invalid syntax.')
# Case 1: -500
elif byte_range_string.startswith('-'):
first = max(0, (content_length + int(byte_range_string)))
last = content_length - 1
# Case 2: 500-
elif byte_range_string.endswith('-'):
first = int(byte_range_string[0:-1])
last = content_length - 1
# Case 3: 500-999
else:
first, last = byte_range_string.split('-')
first = int(first)
last = min(int(last), content_length - 1)
ranges.append((first, last))
if len(ranges) == 0:
raise ValueError('Invalid syntax')
return unit, ranges
|
agpl-3.0
|
osaddon/cimi
|
cimi/cimi.py
|
1
|
6606
|
# Copyright (c) 2012 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cimi middleware.
"""
from nova.openstack.common import log as logging
from urllib import unquote
from webob import Request
from urlparse import urlparse
import json
import threading
from cimiapp.machine import (MachineCtrler,
MachineColCtrler)
from cimiapp.machineimage import (MachineImageCtrler,
MachineImageColCtrler)
from cimiapp.machineconfig import (MachineConfigCtrler,
MachineConfigColCtrler)
from cimiapp.network import (NetworkInterfaceCtrler,
NetworkInterfaceColCtrler)
from cimiapp.cloudentrypoint import CloudEntryPointCtrler
from cimiapp.address import (NetworkAddressCtrler,
NetworkAddressColCtrler)
from cimiapp.volume import VolumeColCtrler, VolumeCtrler
from cimiapp.machinevolume import (MachineVolumeCtrler,
MachineVolumeColCtrler)
from cimiapp.cimiutils import get_err_response
LOG = logging.getLogger(__name__)
LOCK = threading.Lock()
class CIMIMiddleware(object):
"""CIMI Middleware"""
CONTROLLERS = {'cloudentrypoint': CloudEntryPointCtrler,
'machine': MachineCtrler,
'machinecollection': MachineColCtrler,
'machineconfiguration': MachineConfigCtrler,
'machineconfigurationcollection': MachineConfigColCtrler,
'machineimage': MachineImageCtrler,
'machineimagecollection': MachineImageColCtrler,
'networkinterface': NetworkInterfaceCtrler,
'networkinterfacescollection': NetworkInterfaceColCtrler,
'machinenetworkinterfaceaddress': NetworkAddressCtrler,
'machinenetworkinterfaceaddressescollection':
NetworkAddressColCtrler,
'volumecollection': VolumeColCtrler,
'volume': VolumeCtrler,
'machinevolume': MachineVolumeCtrler,
'machinevolumecollection': MachineVolumeColCtrler}
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.conf = conf
self.request_prefix = self.conf.get('request_prefix')
self.prefix_length = len(self.request_prefix)
def _process_config(self, service_name):
endpoint = self.conf.get(service_name)
if endpoint:
parts = urlparse(endpoint)
self.conf.setdefault(service_name + '_host', parts.hostname)
self.conf.setdefault(service_name + '_port', parts.port)
self.conf.setdefault(service_name + '_scheme', parts.scheme)
def _process_config_header(self, env):
"""
this method get the catalog endpoints from the header if keystone
is used.
"""
if not self.conf.get('CONFIG_DONE'):
LOG.info('processing header')
# critical section, acquire a lock
if LOCK.acquire():
self.conf.setdefault('CONFIG_DONE', True)
catalog_str = env.get('HTTP_X_SERVICE_CATALOG')
if catalog_str:
catalogs = json.loads(catalog_str)
for catalog in catalogs:
name = catalog['type'] + '_endpoint'
if not self.conf.get(name):
uri = catalog['endpoints'][0]['publicURL']
self.conf.setdefault(name, uri)
self._process_config('volume_endpoint')
self._process_config('compute_endpoint')
LOCK.release()
def get_controller(self, path):
"""Get the request controller according to the request path
this method returns a response, a controller and tenant id and parsed
list of path.
if the path starts with cimiv1, then this is CIMI request, the next
segment in the path should indicate the controller. if the controller
does not exist, then the response will indicate the error. if the
controller is found, then the response will be None.
if the path does not start with cimiv1, then this is not CIMI request,
the controller and response will be none. the request should be
forwarded to the next filter in the pipeline.
"""
parts = path.strip('/').split('/')
# each request should have /cimiv1/tenant_id/controller_key
# in its url pattern.
if len(parts) >= 2:
controller_key = parts[1].lower()
controller = self.CONTROLLERS.get(controller_key)
return None, controller, parts[0], parts[2:]
else:
resp = get_err_response('BadRequest')
return resp, None, None, None
def __call__(self, env, start_response):
LOG.info(env)
if env.get('SCRIPT_NAME', '').startswith(self.request_prefix):
self._process_config_header(env)
path = unquote(env.get('PATH_INFO', ''))
response, controller, tenant_id, parts = self.get_controller(path)
if response:
return response(env, start_response)
elif controller:
req = Request(env)
ctrler = controller(self.conf, self.app, req,
tenant_id, *parts)
method = env.get('REQUEST_METHOD').upper()
if hasattr(ctrler, method) and not method.startswith('_'):
res = getattr(ctrler, method)(req, *parts)
return res(env, start_response)
else:
res = get_err_response('NotImplemented')
return res(env, start_response)
else:
res = get_err_response('NotImplemented')
return res(env, start_response)
else:
return self.app(env, start_response)
|
apache-2.0
|
stacywsmith/ansible
|
test/units/executor/test_playbook_executor.py
|
60
|
6304
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.playbook import Playbook
from ansible.template import Templar
from units.mock.loader import DictDataLoader
class TestPlaybookExecutor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_serialized_batches(self):
fake_loader = DictDataLoader({
'no_serial.yml': '''
- hosts: all
gather_facts: no
tasks:
- debug: var=inventory_hostname
''',
'serial_int.yml': '''
- hosts: all
gather_facts: no
serial: 2
tasks:
- debug: var=inventory_hostname
''',
'serial_pct.yml': '''
- hosts: all
gather_facts: no
serial: 20%
tasks:
- debug: var=inventory_hostname
''',
'serial_list.yml': '''
- hosts: all
gather_facts: no
serial: [1, 2, 3]
tasks:
- debug: var=inventory_hostname
''',
'serial_list_mixed.yml': '''
- hosts: all
gather_facts: no
serial: [1, "20%", -1]
tasks:
- debug: var=inventory_hostname
''',
})
mock_inventory = MagicMock()
mock_var_manager = MagicMock()
# fake out options to use the syntax CLI switch, which will ensure
# the PlaybookExecutor doesn't create a TaskQueueManager
mock_options = MagicMock()
mock_options.syntax.value = True
templar = Templar(loader=fake_loader)
pbe = PlaybookExecutor(
playbooks=['no_serial.yml', 'serial_int.yml', 'serial_pct.yml', 'serial_list.yml', 'serial_list_mixed.yml'],
inventory=mock_inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
options=mock_options,
passwords=[],
)
playbook = Playbook.load(pbe._playbooks[0], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']])
playbook = Playbook.load(pbe._playbooks[1], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0','host1'],['host2','host3'],['host4','host5'],['host6','host7'],['host8','host9']])
playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0','host1'],['host2','host3'],['host4','host5'],['host6','host7'],['host8','host9']])
playbook = Playbook.load(pbe._playbooks[3], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0'],['host1','host2'],['host3','host4','host5'],['host6','host7','host8'],['host9']])
playbook = Playbook.load(pbe._playbooks[4], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0'],['host1','host2'],['host3','host4','host5','host6','host7','host8','host9']])
# Test when serial percent is under 1.0
playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2']
self.assertEqual(pbe._get_serialized_batches(play), [['host0'],['host1'],['host2']])
# Test when there is a remainder for serial as a percent
playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9','host10']
self.assertEqual(
pbe._get_serialized_batches(play),
[['host0','host1'],['host2','host3'],['host4','host5'],['host6','host7'],['host8','host9'],['host10']]
)
|
gpl-3.0
|
jddixon/fieldz
|
tests/test_reg.py
|
1
|
1974
|
#!/usr/bin/env python3
# test_reg.py
# import time
import unittest
# from rnglib import SimpleRNG
from wireops.enum import FieldTypes
from fieldz.reg import NodeReg
from fieldz.enum import CoreTypes
# TESTS --------------------------------------------------------------
class TestReg(unittest.TestCase):
def test_node_reg(self):
test_reg = NodeReg()
nbr_coretypes = len(CoreTypes)
self.assertEqual(nbr_coretypes, 6)
nbr_fieldtypes = len(FieldTypes)
self.assertEqual(nbr_fieldtypes, 18)
# DEBUG
print("test_node_reg: nbr_coretypes is %d" % nbr_coretypes)
print("test_node_reg: nbr_fieldtypes is %d" % nbr_fieldtypes)
print(
"test_node_reg: test_reg.next_reg_id is %d test_reg.next_reg_id" %
test_reg.next_reg_id)
# END
self.assertEqual(
len(FieldTypes) + nbr_coretypes,
test_reg.next_reg_id)
# verify that all fieldTypes are defined in the registry, each
# with the proper index (vBool through fBytes32 at FieldTypes.MAX_NDX)
for ndx, ftype in enumerate(FieldTypes):
name = test_reg[ndx].qual_name
# DEBUG
print('%2u %s' % (ndx, name))
# END
self.assertEqual(ftype.sym, name)
for ndx, coretype in enumerate(CoreTypes):
i = ndx + nbr_fieldtypes
name = test_reg[i].qual_name
# DEBUG
print('%2u %s' % (i, name))
# END
self.assertEqual(coretype.sym, name)
# XXX FIGURE THIS OUT
# self.assertEqual(test_reg.name2reg_id(name), i)
self.assertEqual(nbr_fieldtypes + nbr_coretypes, len(test_reg))
# print "DEBUG: len(test_reg) is %u" % len(test_reg)
# print "DEBUG: next_reg_id is %u" % test_reg.next_reg_id
self.assertEqual(len(test_reg), test_reg.next_reg_id)
if __name__ == '__main__':
unittest.main()
|
mit
|
bradparks/sleepy-puppy
|
sleepypuppy/admin/user/views.py
|
13
|
1706
|
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext import login
from flask_wtf import Form
from wtforms import validators
from models import User
class UserView(ModelView):
"""
ModelView override of Flask Admin for Users.
"""
# CSRF protection
form_base_class = Form
# Ensure user is authenticated
def is_accessible(self):
return login.current_user.is_authenticated()
# Column tweaks
column_list = ('email', 'assessments')
column_labels = dict(email='Email Address', assessments='Assessments')
# Form tweaks and validations
form_args = dict(
email=dict(
description='Enter email address to recieve notifications when captures are recieved',
validators=[validators.required(), validators.email()]
),
assessments=dict(
description='Subscribe to assessments to recieve notifications',
validators=[validators.required()]
)
)
def __init__(self, session, **kwargs):
super(UserView, self).__init__(User, session, **kwargs)
|
apache-2.0
|
lipro/linux-tqs
|
arch/ia64/scripts/unwcheck.py
|
916
|
1718
|
#!/usr/bin/env python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
gpl-2.0
|
sosy-lab/benchexec
|
benchexec/test_cgroups.py
|
3
|
2072
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import logging
import subprocess
import sys
import unittest
from benchexec import check_cgroups
sys.dont_write_bytecode = True # prevent creation of .pyc files
class TestCheckCgroups(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
logging.disable(logging.CRITICAL)
def execute_run_extern(self, *args, **kwargs):
try:
return subprocess.check_output(
args=["python3", "-m", "benchexec.check_cgroups"] + list(args),
stderr=subprocess.STDOUT,
universal_newlines=True,
**kwargs,
)
except subprocess.CalledProcessError as e:
if e.returncode != 1: # 1 is expected if cgroups are not available
print(e.output)
raise e
def test_extern_command(self):
self.execute_run_extern()
def test_simple(self):
try:
check_cgroups.main(["--no-thread"])
except SystemExit as e:
# expected if cgroups are not available
self.skipTest(e)
def test_threaded(self):
try:
check_cgroups.main([])
except SystemExit as e:
# expected if cgroups are not available
self.skipTest(e)
def test_thread_result_is_returned(self):
"""
Test that an error raised by check_cgroup_availability is correctly
re-raised in the main thread by replacing this function temporarily.
"""
tmp = check_cgroups.check_cgroup_availability
try:
check_cgroups.check_cgroup_availability = lambda wait: exit(1)
with self.assertRaises(SystemExit):
check_cgroups.main([])
finally:
check_cgroups.check_cgroup_availability = tmp
|
apache-2.0
|
mmagnus/rna-pdb-tools
|
rna_tools/tools/rna_calc_rmsd/lib/rmsd/calculate_rmsd.py
|
2
|
5276
|
#!/usr/bin/env python
"""
Calculate RMSD between two XYZ files
by: Jimmy Charnley Kromann <jimmy@charnley.dk> and Lars Andersen Bratholm <larsbratholm@gmail.com>
project: https://github.com/charnley/rmsd
license: https://github.com/charnley/rmsd/blob/master/LICENSE
"""
import numpy as np
import re
from rna_tools.tools.extra_functions.select_fragment import is_in_selection
def kabsch_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
"""
P = rotate(P, Q)
return rmsd(P, Q)
def rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
The optimal rotation matrix U is calculated and then used to rotate matrix
P unto matrix Q so the minimum root-mean-square deviation (RMSD) can be
calculated.
Using the Kabsch algorithm with two sets of paired point P and Q,
centered around the center-of-mass.
Each vector set is represented as an NxD matrix, where D is the
the dimension of the space.
The algorithm works in three steps:
- a translation of P and Q
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters:
P -- (N, number of points)x(D, dimension) matrix
Q -- (N, number of points)x(D, dimension) matrix
Returns:
U -- Rotation matrix
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def centroid(X):
"""
Calculate the centroid from a vectorset X
"""
C = sum(X)/len(X)
return C
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
"""
D = len(V[0])
N = len(V)
rmsd = 0.0
for v, w in zip(V, W):
rmsd += sum([(v[i]-w[i])**2.0 for i in range(D)])
return np.sqrt(rmsd/N)
def get_coordinates(filename, selection, ignore_selection, fmt, ignore_hydrogens):
"""Get coordinates from filename."""
return get_coordinates_pdb(filename, selection, ignore_selection, ignore_hydrogens)
def get_coordinates_pdb(filename, selection, ignore_selection, ignore_hydrogens):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the above
# column indices as a fallback.
x_column = None
V = []
# Same with atoms and atom naming. The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = []
resi_set = set()
with open(filename) as f:
lines = f.readlines()
for line in lines:
# hmm...
# of models: 490
# Error: # of atoms is not equal target (1f27_rpr.pdb):641 vs model (struc/1f27_rnakbnm_decoy0001_amb_clx_rpr.pdb):408
#if line.startswith("TER") or line.startswith("END"):
# break
if line.startswith("ATOM"):
curr_chain_id = line[21]
curr_resi = int(line[22:26])
curr_atom_name = line[12:16].strip()
if selection:
if curr_chain_id in selection:
if curr_resi in selection[curr_chain_id]:
# ignore if to be ingored (!)
#try:
resi_set.add(curr_chain_id + ':' + str(curr_resi))
x = line[30:38]
y = line[38:46]
z = line[46:54]
if ignore_selection:
if not is_in_selection(ignore_selection, curr_chain_id, curr_resi, curr_atom_name):
V.append(np.asarray([x,y,z],dtype=float))
else:
V.append(np.asarray([x,y,z],dtype=float))
else:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x,y,z],dtype=float))
V = np.asarray(V)
#print filename, resi_set, len(resi_set)
return len(V), V
|
gpl-3.0
|
Kamik423/uni_plan
|
plan/plan/lib/python3.4/site-packages/pip/_vendor/pyparsing.py
|
417
|
224171
|
# module pyparsing.py
#
# Copyright (c) 2003-2016 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.10"
__versionTime__ = "07 Oct 2016 01:31 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.popitem(False)
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
prints::
['More', 'Iron', 'Lead', 'Gold', 'I']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, collections.Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
|
apache-2.0
|
morelab/appcomposer
|
alembic/versions/d5126053d47e_delete_english_history_records_over_50.py
|
3
|
3289
|
"""delete English history records over 50
Revision ID: d5126053d47e
Revises: bbe219b77366
Create Date: 2019-01-06 18:12:12.357726
"""
# revision identifiers, used by Alembic.
revision = 'd5126053d47e'
down_revision = 'bbe219b77366'
import sys
import time
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
metadata = sa.MetaData()
translation_urls = sa.Table('TranslationUrls', metadata,
sa.Column('id', sa.Integer()),
sa.Column('url', sa.Unicode(255)),
)
translation_bundles = sa.Table('TranslationBundles', metadata,
sa.Column('id', sa.Integer()),
sa.Column('language', sa.Unicode(20)),
sa.Column('target', sa.Unicode(20)),
sa.Column('translation_url_id', sa.Integer()),
)
message_history = sa.Table('TranslationMessageHistory', metadata,
sa.Column('id', sa.Integer()),
sa.Column('bundle_id', sa.Integer()),
sa.Column('key', sa.Unicode(255)),
sa.Column('datetime', sa.DateTime()),
)
def dbg(message):
print "[{}] {}".format(time.asctime(), message)
sys.stdout.flush()
def upgrade():
# Take only English bundles
all_bundles_stmt = sql.select([
translation_bundles.c.id,
translation_bundles.c.language,
translation_bundles.c.target,
translation_bundles.c.translation_url_id
], translation_bundles.c.language == 'en_ALL')
N = 50
for bundle_row in list(op.get_bind().execute(all_bundles_stmt)):
bundle_id = bundle_row[translation_bundles.c.id]
count_total = list(op.get_bind().execute(sql.select([sa.func.count(message_history.c.id)], message_history.c.bundle_id == bundle_id)))[0][0]
if count_total > 1000: # Only focus on the ones with more than 1000 revisions
dbg("Big bundle. Analyzing")
dbg(" - Data: {} {}".format(bundle_row, count_total))
keys = [ key for key, in op.get_bind().execute(sql.select([sa.func.distinct(message_history.c.key)], message_history.c.bundle_id == bundle_id)) ]
dbg(" - Keys: {}".format(len(keys)))
for key in keys:
records_found = list(op.get_bind().execute(sql.select([sa.func.count(message_history.c.id)], sa.and_(message_history.c.key == unicode(key), message_history.c.bundle_id == bundle_id))))[0][0]
if records_found > N:
dbg(" - Key {} has {} records".format(key, records_found))
while records_found > N:
cut_in = records_found - N
if cut_in > 200:
cut_in = 200
records_found = records_found - cut_in
ids_to_remove_stmt = sql.select([message_history.c.id, message_history.c.datetime], sa.and_(message_history.c.key == unicode(key), message_history.c.bundle_id == bundle_id)).order_by(sa.asc(message_history.c.datetime)).limit(cut_in)
ids_to_remove = [ id_to_remove for id_to_remove, record_datetime in op.get_bind().execute(ids_to_remove_stmt) ]
dbg(" - Deleting {} of them... after: {}".format(cut_in, records_found))
op.get_bind().execute(message_history.delete(message_history.c.id.in_(ids_to_remove)))
def downgrade():
pass
|
bsd-2-clause
|
kmonsoor/python-for-android
|
python3-alpha/python3-src/Lib/test/test_sundry.py
|
55
|
2275
|
"""Do a minimal test of all the modules that aren't otherwise tested."""
from test import support
import sys
import unittest
class TestUntestedModules(unittest.TestCase):
def test_at_least_import_untested_modules(self):
with support.check_warnings(quiet=True):
import bdb
import cgitb
import code
import distutils.bcppcompiler
import distutils.ccompiler
import distutils.cygwinccompiler
import distutils.emxccompiler
import distutils.filelist
if sys.platform.startswith('win'):
import distutils.msvccompiler
import distutils.text_file
import distutils.unixccompiler
import distutils.command.bdist_dumb
if sys.platform.startswith('win'):
import distutils.command.bdist_msi
import distutils.command.bdist
import distutils.command.bdist_rpm
import distutils.command.bdist_wininst
import distutils.command.build_clib
import distutils.command.build_ext
import distutils.command.build
import distutils.command.clean
import distutils.command.config
import distutils.command.install_data
import distutils.command.install_egg_info
import distutils.command.install_headers
import distutils.command.install_lib
import distutils.command.register
import distutils.command.sdist
import distutils.command.upload
import encodings
import formatter
import getpass
import html.entities
import imghdr
import keyword
import macurl2path
import mailcap
import nturl2path
import os2emxpath
import pstats
import py_compile
import sndhdr
import tabnanny
try:
import tty # not available on Windows
except ImportError:
if support.verbose:
print("skipping tty")
def test_main():
support.run_unittest(TestUntestedModules)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
vipul-sharma20/oh-mainline
|
vendor/packages/twisted/twisted/python/test/test_deprecate.py
|
18
|
24646
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for Twisted's deprecation framework, L{twisted.python.deprecate}.
"""
import sys, types
import warnings
from os.path import normcase
from twisted.trial.unittest import TestCase
from twisted.python import deprecate
from twisted.python.deprecate import _appendToDocstring
from twisted.python.deprecate import _getDeprecationDocstring
from twisted.python.deprecate import deprecated, getDeprecationWarningString
from twisted.python.deprecate import _getDeprecationWarningString
from twisted.python.deprecate import DEPRECATION_WARNING_FORMAT
from twisted.python.reflect import fullyQualifiedName
from twisted.python.versions import Version
from twisted.python.filepath import FilePath
from twisted.python.test import deprecatedattributes
def dummyCallable():
"""
Do nothing.
This is used to test the deprecation decorators.
"""
def dummyReplacementMethod():
"""
Do nothing.
This is used to test the replacement parameter to L{deprecated}.
"""
class TestDeprecationWarnings(TestCase):
def test_getDeprecationWarningString(self):
"""
L{getDeprecationWarningString} returns a string that tells us that a
callable was deprecated at a certain released version of Twisted.
"""
version = Version('Twisted', 8, 0, 0)
self.assertEqual(
getDeprecationWarningString(self.test_getDeprecationWarningString,
version),
"twisted.python.test.test_deprecate.TestDeprecationWarnings."
"test_getDeprecationWarningString was deprecated in "
"Twisted 8.0.0")
def test_getDeprecationWarningStringWithFormat(self):
"""
L{getDeprecationWarningString} returns a string that tells us that a
callable was deprecated at a certain released version of Twisted, with
a message containing additional information about the deprecation.
"""
version = Version('Twisted', 8, 0, 0)
format = deprecate.DEPRECATION_WARNING_FORMAT + ': This is a message'
self.assertEquals(
getDeprecationWarningString(self.test_getDeprecationWarningString,
version, format),
'twisted.python.test.test_deprecate.TestDeprecationWarnings.'
'test_getDeprecationWarningString was deprecated in '
'Twisted 8.0.0: This is a message')
def test_deprecateEmitsWarning(self):
"""
Decorating a callable with L{deprecated} emits a warning.
"""
version = Version('Twisted', 8, 0, 0)
dummy = deprecated(version)(dummyCallable)
def addStackLevel():
dummy()
self.assertWarns(
DeprecationWarning,
getDeprecationWarningString(dummyCallable, version),
__file__,
addStackLevel)
def test_deprecatedPreservesName(self):
"""
The decorated function has the same name as the original.
"""
version = Version('Twisted', 8, 0, 0)
dummy = deprecated(version)(dummyCallable)
self.assertEqual(dummyCallable.__name__, dummy.__name__)
self.assertEqual(fullyQualifiedName(dummyCallable),
fullyQualifiedName(dummy))
def test_getDeprecationDocstring(self):
"""
L{_getDeprecationDocstring} returns a note about the deprecation to go
into a docstring.
"""
version = Version('Twisted', 8, 0, 0)
self.assertEqual(
"Deprecated in Twisted 8.0.0.",
_getDeprecationDocstring(version, ''))
def test_deprecatedUpdatesDocstring(self):
"""
The docstring of the deprecated function is appended with information
about the deprecation.
"""
version = Version('Twisted', 8, 0, 0)
dummy = deprecated(version)(dummyCallable)
_appendToDocstring(
dummyCallable,
_getDeprecationDocstring(version, ''))
self.assertEqual(dummyCallable.__doc__, dummy.__doc__)
def test_versionMetadata(self):
"""
Deprecating a function adds version information to the decorated
version of that function.
"""
version = Version('Twisted', 8, 0, 0)
dummy = deprecated(version)(dummyCallable)
self.assertEqual(version, dummy.deprecatedVersion)
def test_getDeprecationWarningStringReplacement(self):
"""
L{getDeprecationWarningString} takes an additional replacement parameter
that can be used to add information to the deprecation. If the
replacement parameter is a string, it will be interpolated directly into
the result.
"""
version = Version('Twisted', 8, 0, 0)
warningString = getDeprecationWarningString(
self.test_getDeprecationWarningString, version,
replacement="something.foobar")
self.assertEqual(
warningString,
"%s was deprecated in Twisted 8.0.0; please use something.foobar "
"instead" % (
fullyQualifiedName(self.test_getDeprecationWarningString),))
def test_getDeprecationWarningStringReplacementWithCallable(self):
"""
L{getDeprecationWarningString} takes an additional replacement parameter
that can be used to add information to the deprecation. If the
replacement parameter is a callable, its fully qualified name will be
interpolated into the result.
"""
version = Version('Twisted', 8, 0, 0)
warningString = getDeprecationWarningString(
self.test_getDeprecationWarningString, version,
replacement=dummyReplacementMethod)
self.assertEqual(
warningString,
"%s was deprecated in Twisted 8.0.0; please use "
"twisted.python.test.test_deprecate.dummyReplacementMethod "
"instead" % (
fullyQualifiedName(self.test_getDeprecationWarningString),))
def test_deprecatedReplacement(self):
"""
L{deprecated} takes an additional replacement parameter that can be used
to indicate the new, non-deprecated method developers should use. If
the replacement parameter is a string, it will be interpolated directly
into the warning message.
"""
version = Version('Twisted', 8, 0, 0)
dummy = deprecated(version, "something.foobar")(dummyCallable)
self.assertEquals(dummy.__doc__,
"\n"
" Do nothing.\n\n"
" This is used to test the deprecation decorators.\n\n"
" Deprecated in Twisted 8.0.0; please use "
"something.foobar"
" instead.\n"
" ")
def test_deprecatedReplacementWithCallable(self):
"""
L{deprecated} takes an additional replacement parameter that can be used
to indicate the new, non-deprecated method developers should use. If
the replacement parameter is a callable, its fully qualified name will
be interpolated into the warning message.
"""
version = Version('Twisted', 8, 0, 0)
decorator = deprecated(version, replacement=dummyReplacementMethod)
dummy = decorator(dummyCallable)
self.assertEquals(dummy.__doc__,
"\n"
" Do nothing.\n\n"
" This is used to test the deprecation decorators.\n\n"
" Deprecated in Twisted 8.0.0; please use "
"twisted.python.test.test_deprecate.dummyReplacementMethod"
" instead.\n"
" ")
class TestAppendToDocstring(TestCase):
"""
Test the _appendToDocstring function.
_appendToDocstring is used to add text to a docstring.
"""
def test_appendToEmptyDocstring(self):
"""
Appending to an empty docstring simply replaces the docstring.
"""
def noDocstring():
pass
_appendToDocstring(noDocstring, "Appended text.")
self.assertEqual("Appended text.", noDocstring.__doc__)
def test_appendToSingleLineDocstring(self):
"""
Appending to a single line docstring places the message on a new line,
with a blank line separating it from the rest of the docstring.
The docstring ends with a newline, conforming to Twisted and PEP 8
standards. Unfortunately, the indentation is incorrect, since the
existing docstring doesn't have enough info to help us indent
properly.
"""
def singleLineDocstring():
"""This doesn't comply with standards, but is here for a test."""
_appendToDocstring(singleLineDocstring, "Appended text.")
self.assertEqual(
["This doesn't comply with standards, but is here for a test.",
"",
"Appended text."],
singleLineDocstring.__doc__.splitlines())
self.assertTrue(singleLineDocstring.__doc__.endswith('\n'))
def test_appendToMultilineDocstring(self):
"""
Appending to a multi-line docstring places the messade on a new line,
with a blank line separating it from the rest of the docstring.
Because we have multiple lines, we have enough information to do
indentation.
"""
def multiLineDocstring():
"""
This is a multi-line docstring.
"""
def expectedDocstring():
"""
This is a multi-line docstring.
Appended text.
"""
_appendToDocstring(multiLineDocstring, "Appended text.")
self.assertEqual(
expectedDocstring.__doc__, multiLineDocstring.__doc__)
class _MockDeprecatedAttribute(object):
"""
Mock of L{twisted.python.deprecate._DeprecatedAttribute}.
@ivar value: The value of the attribute.
"""
def __init__(self, value):
self.value = value
def get(self):
"""
Get a known value.
"""
return self.value
class ModuleProxyTests(TestCase):
"""
Tests for L{twisted.python.deprecate._ModuleProxy}, which proxies
access to module-level attributes, intercepting access to deprecated
attributes and passing through access to normal attributes.
"""
def _makeProxy(self, **attrs):
"""
Create a temporary module proxy object.
@param **kw: Attributes to initialise on the temporary module object
@rtype: L{twistd.python.deprecate._ModuleProxy}
"""
mod = types.ModuleType('foo')
for key, value in attrs.iteritems():
setattr(mod, key, value)
return deprecate._ModuleProxy(mod)
def test_getattrPassthrough(self):
"""
Getting a normal attribute on a L{twisted.python.deprecate._ModuleProxy}
retrieves the underlying attribute's value, and raises C{AttributeError}
if a non-existant attribute is accessed.
"""
proxy = self._makeProxy(SOME_ATTRIBUTE='hello')
self.assertIdentical(proxy.SOME_ATTRIBUTE, 'hello')
self.assertRaises(AttributeError, getattr, proxy, 'DOES_NOT_EXIST')
def test_getattrIntercept(self):
"""
Getting an attribute marked as being deprecated on
L{twisted.python.deprecate._ModuleProxy} results in calling the
deprecated wrapper's C{get} method.
"""
proxy = self._makeProxy()
_deprecatedAttributes = object.__getattribute__(
proxy, '_deprecatedAttributes')
_deprecatedAttributes['foo'] = _MockDeprecatedAttribute(42)
self.assertEquals(proxy.foo, 42)
def test_privateAttributes(self):
"""
Private attributes of L{twisted.python.deprecate._ModuleProxy} are
inaccessible when regular attribute access is used.
"""
proxy = self._makeProxy()
self.assertRaises(AttributeError, getattr, proxy, '_module')
self.assertRaises(
AttributeError, getattr, proxy, '_deprecatedAttributes')
def test_setattr(self):
"""
Setting attributes on L{twisted.python.deprecate._ModuleProxy} proxies
them through to the wrapped module.
"""
proxy = self._makeProxy()
proxy._module = 1
self.assertNotEquals(object.__getattribute__(proxy, '_module'), 1)
self.assertEquals(proxy._module, 1)
def test_repr(self):
"""
L{twisted.python.deprecated._ModuleProxy.__repr__} produces a string
containing the proxy type and a representation of the wrapped module
object.
"""
proxy = self._makeProxy()
realModule = object.__getattribute__(proxy, '_module')
self.assertEquals(
repr(proxy), '<%s module=%r>' % (type(proxy).__name__, realModule))
class DeprecatedAttributeTests(TestCase):
"""
Tests for L{twisted.python.deprecate._DeprecatedAttribute} and
L{twisted.python.deprecate.deprecatedModuleAttribute}, which issue
warnings for deprecated module-level attributes.
"""
def setUp(self):
self.version = deprecatedattributes.version
self.message = deprecatedattributes.message
self._testModuleName = __name__ + '.foo'
def _getWarningString(self, attr):
"""
Create the warning string used by deprecated attributes.
"""
return _getDeprecationWarningString(
deprecatedattributes.__name__ + '.' + attr,
deprecatedattributes.version,
DEPRECATION_WARNING_FORMAT + ': ' + deprecatedattributes.message)
def test_deprecatedAttributeHelper(self):
"""
L{twisted.python.deprecate._DeprecatedAttribute} correctly sets its
__name__ to match that of the deprecated attribute and emits a warning
when the original attribute value is accessed.
"""
name = 'ANOTHER_DEPRECATED_ATTRIBUTE'
setattr(deprecatedattributes, name, 42)
attr = deprecate._DeprecatedAttribute(
deprecatedattributes, name, self.version, self.message)
self.assertEquals(attr.__name__, name)
# Since we're accessing the value getter directly, as opposed to via
# the module proxy, we need to match the warning's stack level.
def addStackLevel():
attr.get()
# Access the deprecated attribute.
addStackLevel()
warningsShown = self.flushWarnings([
self.test_deprecatedAttributeHelper])
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEquals(
warningsShown[0]['message'],
self._getWarningString(name))
self.assertEquals(len(warningsShown), 1)
def test_deprecatedAttribute(self):
"""
L{twisted.python.deprecate.deprecatedModuleAttribute} wraps a
module-level attribute in an object that emits a deprecation warning
when it is accessed the first time only, while leaving other unrelated
attributes alone.
"""
# Accessing non-deprecated attributes does not issue a warning.
deprecatedattributes.ANOTHER_ATTRIBUTE
warningsShown = self.flushWarnings([self.test_deprecatedAttribute])
self.assertEquals(len(warningsShown), 0)
name = 'DEPRECATED_ATTRIBUTE'
# Access the deprecated attribute. This uses getattr to avoid repeating
# the attribute name.
getattr(deprecatedattributes, name)
warningsShown = self.flushWarnings([self.test_deprecatedAttribute])
self.assertEquals(len(warningsShown), 1)
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEquals(
warningsShown[0]['message'],
self._getWarningString(name))
def test_wrappedModule(self):
"""
Deprecating an attribute in a module replaces and wraps that module
instance, in C{sys.modules}, with a
L{twisted.python.deprecate._ModuleProxy} instance but only if it hasn't
already been wrapped.
"""
sys.modules[self._testModuleName] = mod = types.ModuleType('foo')
self.addCleanup(sys.modules.pop, self._testModuleName)
setattr(mod, 'first', 1)
setattr(mod, 'second', 2)
deprecate.deprecatedModuleAttribute(
Version('Twisted', 8, 0, 0),
'message',
self._testModuleName,
'first')
proxy = sys.modules[self._testModuleName]
self.assertNotEqual(proxy, mod)
deprecate.deprecatedModuleAttribute(
Version('Twisted', 8, 0, 0),
'message',
self._testModuleName,
'second')
self.assertIdentical(proxy, sys.modules[self._testModuleName])
_packageInit = """\
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.python.versions import Version
deprecatedModuleAttribute(
Version('Package', 1, 2, 3), 'message', __name__, 'module')
"""
def test_deprecatedModule(self):
"""
If L{deprecatedModuleAttribute} is used to deprecate a module attribute
of a package, only one deprecation warning is emitted when the
deprecated module is imported.
"""
base = FilePath(self.mktemp())
base.makedirs()
package = base.child('package')
package.makedirs()
package.child('__init__.py').setContent(self._packageInit)
module = package.child('module.py').setContent('')
sys.path.insert(0, base.path)
self.addCleanup(sys.path.remove, base.path)
# import package.module
from package import module
# make sure it's the right module.
self.assertEquals(module.__file__.rsplit(".", 1)[0],
package.child('module.py').path.rsplit(".", 1)[0])
warningsShown = self.flushWarnings([self.test_deprecatedModule])
self.assertEquals(len(warningsShown), 1)
class WarnAboutFunctionTests(TestCase):
"""
Tests for L{twisted.python.deprecate.warnAboutFunction} which allows the
callers of a function to issue a C{DeprecationWarning} about that function.
"""
def setUp(self):
"""
Create a file that will have known line numbers when emitting warnings.
"""
self.package = FilePath(self.mktemp()).child('twisted_private_helper')
self.package.makedirs()
self.package.child('__init__.py').setContent('')
self.package.child('module.py').setContent('''
"A module string"
from twisted.python import deprecate
def testFunction():
"A doc string"
a = 1 + 2
return a
def callTestFunction():
b = testFunction()
if b == 3:
deprecate.warnAboutFunction(testFunction, "A Warning String")
''')
sys.path.insert(0, self.package.parent().path)
self.addCleanup(sys.path.remove, self.package.parent().path)
modules = sys.modules.copy()
self.addCleanup(
lambda: (sys.modules.clear(), sys.modules.update(modules)))
def test_warning(self):
"""
L{deprecate.warnAboutFunction} emits a warning the file and line number
of which point to the beginning of the implementation of the function
passed to it.
"""
def aFunc():
pass
deprecate.warnAboutFunction(aFunc, 'A Warning Message')
warningsShown = self.flushWarnings()
filename = __file__
if filename.lower().endswith('.pyc'):
filename = filename[:-1]
self.assertSamePath(
FilePath(warningsShown[0]["filename"]), FilePath(filename))
self.assertEquals(warningsShown[0]["message"], "A Warning Message")
def test_warningLineNumber(self):
"""
L{deprecate.warnAboutFunction} emits a C{DeprecationWarning} with the
number of a line within the implementation of the function passed to it.
"""
from twisted_private_helper import module
module.callTestFunction()
warningsShown = self.flushWarnings()
self.assertSamePath(
FilePath(warningsShown[0]["filename"]),
self.package.sibling('twisted_private_helper').child('module.py'))
# Line number 9 is the last line in the testFunction in the helper
# module.
self.assertEquals(warningsShown[0]["lineno"], 9)
self.assertEquals(warningsShown[0]["message"], "A Warning String")
self.assertEquals(len(warningsShown), 1)
def assertSamePath(self, first, second):
"""
Assert that the two paths are the same, considering case normalization
appropriate for the current platform.
@type first: L{FilePath}
@type second: L{FilePath}
@raise C{self.failureType}: If the paths are not the same.
"""
self.assertTrue(
normcase(first.path) == normcase(second.path),
"%r != %r" % (first, second))
def test_renamedFile(self):
"""
Even if the implementation of a deprecated function is moved around on
the filesystem, the line number in the warning emitted by
L{deprecate.warnAboutFunction} points to a line in the implementation of
the deprecated function.
"""
from twisted_private_helper import module
# Clean up the state resulting from that import; we're not going to use
# this module, so it should go away.
del sys.modules['twisted_private_helper']
del sys.modules[module.__name__]
# Rename the source directory
self.package.moveTo(self.package.sibling('twisted_renamed_helper'))
# Import the newly renamed version
from twisted_renamed_helper import module
self.addCleanup(sys.modules.pop, 'twisted_renamed_helper')
self.addCleanup(sys.modules.pop, module.__name__)
module.callTestFunction()
warningsShown = self.flushWarnings()
warnedPath = FilePath(warningsShown[0]["filename"])
expectedPath = self.package.sibling(
'twisted_renamed_helper').child('module.py')
self.assertSamePath(warnedPath, expectedPath)
self.assertEquals(warningsShown[0]["lineno"], 9)
self.assertEquals(warningsShown[0]["message"], "A Warning String")
self.assertEquals(len(warningsShown), 1)
def test_filteredWarning(self):
"""
L{deprecate.warnAboutFunction} emits a warning that will be filtered if
L{warnings.filterwarning} is called with the module name of the
deprecated function.
"""
# Clean up anything *else* that might spuriously filter out the warning,
# such as the "always" simplefilter set up by unittest._collectWarnings.
# We'll also rely on trial to restore the original filters afterwards.
del warnings.filters[:]
warnings.filterwarnings(
action="ignore", module="twisted_private_helper")
from twisted_private_helper import module
module.callTestFunction()
warningsShown = self.flushWarnings()
self.assertEquals(len(warningsShown), 0)
def test_filteredOnceWarning(self):
"""
L{deprecate.warnAboutFunction} emits a warning that will be filtered
once if L{warnings.filterwarning} is called with the module name of the
deprecated function and an action of once.
"""
# Clean up anything *else* that might spuriously filter out the warning,
# such as the "always" simplefilter set up by unittest._collectWarnings.
# We'll also rely on trial to restore the original filters afterwards.
del warnings.filters[:]
warnings.filterwarnings(
action="module", module="twisted_private_helper")
from twisted_private_helper import module
module.callTestFunction()
module.callTestFunction()
warningsShown = self.flushWarnings()
self.assertEquals(len(warningsShown), 1)
message = warningsShown[0]['message']
category = warningsShown[0]['category']
filename = warningsShown[0]['filename']
lineno = warningsShown[0]['lineno']
msg = warnings.formatwarning(message, category, filename, lineno)
self.assertTrue(
msg.endswith("module.py:9: DeprecationWarning: A Warning String\n"
" return a\n"),
"Unexpected warning string: %r" % (msg,))
|
agpl-3.0
|
theguardian/headphones
|
lib/gntp/cli.py
|
122
|
4143
|
# Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
import logging
import os
import sys
from optparse import OptionParser, OptionGroup
from gntp.notifier import GrowlNotifier
from gntp.shim import RawConfigParser
from gntp.version import __version__
DEFAULT_CONFIG = os.path.expanduser('~/.gntp')
config = RawConfigParser({
'hostname': 'localhost',
'password': None,
'port': 23053,
})
config.read([DEFAULT_CONFIG])
if not config.has_section('gntp'):
config.add_section('gntp')
class ClientParser(OptionParser):
def __init__(self):
OptionParser.__init__(self, version="%%prog %s" % __version__)
group = OptionGroup(self, "Network Options")
group.add_option("-H", "--host",
dest="host", default=config.get('gntp', 'hostname'),
help="Specify a hostname to which to send a remote notification. [%default]")
group.add_option("--port",
dest="port", default=config.getint('gntp', 'port'), type="int",
help="port to listen on [%default]")
group.add_option("-P", "--password",
dest='password', default=config.get('gntp', 'password'),
help="Network password")
self.add_option_group(group)
group = OptionGroup(self, "Notification Options")
group.add_option("-n", "--name",
dest="app", default='Python GNTP Test Client',
help="Set the name of the application [%default]")
group.add_option("-s", "--sticky",
dest='sticky', default=False, action="store_true",
help="Make the notification sticky [%default]")
group.add_option("--image",
dest="icon", default=None,
help="Icon for notification (URL or /path/to/file)")
group.add_option("-m", "--message",
dest="message", default=None,
help="Sets the message instead of using stdin")
group.add_option("-p", "--priority",
dest="priority", default=0, type="int",
help="-2 to 2 [%default]")
group.add_option("-d", "--identifier",
dest="identifier",
help="Identifier for coalescing")
group.add_option("-t", "--title",
dest="title", default=None,
help="Set the title of the notification [%default]")
group.add_option("-N", "--notification",
dest="name", default='Notification',
help="Set the notification name [%default]")
group.add_option("--callback",
dest="callback",
help="URL callback")
self.add_option_group(group)
# Extra Options
self.add_option('-v', '--verbose',
dest='verbose', default=0, action='count',
help="Verbosity levels")
def parse_args(self, args=None, values=None):
values, args = OptionParser.parse_args(self, args, values)
if values.message is None:
print('Enter a message followed by Ctrl-D')
try:
message = sys.stdin.read()
except KeyboardInterrupt:
exit()
else:
message = values.message
if values.title is None:
values.title = ' '.join(args)
# If we still have an empty title, use the
# first bit of the message as the title
if values.title == '':
values.title = message[:20]
values.verbose = logging.WARNING - values.verbose * 10
return values, message
def main():
(options, message) = ClientParser().parse_args()
logging.basicConfig(level=options.verbose)
if not os.path.exists(DEFAULT_CONFIG):
logging.info('No config read found at %s', DEFAULT_CONFIG)
growl = GrowlNotifier(
applicationName=options.app,
notifications=[options.name],
defaultNotifications=[options.name],
hostname=options.host,
password=options.password,
port=options.port,
)
result = growl.register()
if result is not True:
exit(result)
# This would likely be better placed within the growl notifier
# class but until I make _checkIcon smarter this is "easier"
if options.icon is not None and not options.icon.startswith('http'):
logging.info('Loading image %s', options.icon)
f = open(options.icon)
options.icon = f.read()
f.close()
result = growl.notify(
noteType=options.name,
title=options.title,
description=message,
icon=options.icon,
sticky=options.sticky,
priority=options.priority,
callback=options.callback,
identifier=options.identifier,
)
if result is not True:
exit(result)
if __name__ == "__main__":
main()
|
gpl-3.0
|
tilenkranjc/cellprofiler-plugins
|
MeasurementRWC.py
|
1
|
20417
|
'''<b>Measure RWC</b> measures the Rank Weighted Correlation (RWC) between intensities in different images (e.g.,
different color channels) on a pixel-by-pixel basis, within identified
objects or across an entire image
<hr>
Given two or more images, this module calculates the RWC between the
pixel intensities. The RWC can be measured for entire
images, or within each individual object.
RWC was developed by Dr. Vasanth R. Singan. Please cite the following paper:
Singan VR, Jones TR, Curran KM, Simpson JC. Dual channel rank-based intensity
weighting for quantitative co-localization of microscopy images.
BMC Bioinformatics. 2011;12:407.
<h4>Available measurements</h4>
<ul>
<li><i>RWC I over J:</i> The RWC of image I over image J. </li>
<li><i>RWC J over I:</i> The RWC of image J over image I. </li>
</ul>
RWCs will be calculated between all pairs of images that are selected in
the module, as well as between selected objects. For example, if RWCs
are to be measured for a set of red, green, and blue images containing identified nuclei,
measurements will be made between the following:
<ul>
<li>The blue and green, red and green, and red and blue images. </li>
<li>The nuclei in each of the above image pairs.</li>
</ul>
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Developed by the Broad Institute
# Copyright 2003-2010
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
__version__="$Revision$"
import numpy as np
from scipy.linalg import lstsq
import scipy.ndimage as scind
import cellprofiler.cpmodule as cpm
import cellprofiler.objects as cpo
import cellprofiler.settings as cps
import cellprofiler.measurements as cpmeas
from cellprofiler.cpmath.cpmorphology import fixup_scipy_ndimage_result as fix
M_IMAGES = "Across entire image"
M_OBJECTS = "Within objects"
M_IMAGES_AND_OBJECTS = "Both"
'''Feature name format for the correlation measurement'''
F_CORRELATION_FORMAT = "Correlation_RWC_%s_%s"
'''Feature name format for the slope measurement'''
F_SLOPE_FORMAT = "Correlation_Slope_%s_%s"
class MeasureRWC(cpm.CPModule):
module_name = 'MeasureRWC'
category = 'Measurement'
variable_revision_number = 1
def create_settings(self):
'''Create the initial settings for the module'''
self.manual_threshold = cps.Float("Enter threshold:", .15)
self.image_groups = []
self.add_image(can_delete = False)
self.spacer_1 = cps.Divider()
self.add_image(can_delete = False)
self.image_count = cps.HiddenCount(self.image_groups)
self.add_image_button = cps.DoSomething("", 'Add another image', self.add_image)
self.spacer_2 = cps.Divider()
self.images_or_objects = cps.Choice('Select where to measure correlation',
[M_IMAGES, M_OBJECTS, M_IMAGES_AND_OBJECTS],
doc = '''
Do you want to measure the RWC over the whole image,
within objects, or both?
Both methods measure RWC on a pixel by pixel basis.
Selecting <i>Objects</i> will measure RWC only in those pixels previously
identified as an object (you will be asked to specify which object). Selecting
<i>Images</i> will measure RWC across all pixels in the images.
<i>Images and objects</i> will calculate both measurements.''')
self.object_groups = []
self.add_object(can_delete = False)
self.object_count = cps.HiddenCount(self.object_groups)
self.spacer_2 = cps.Divider(line=True)
self.add_object_button = cps.DoSomething("", 'Add another object', self.add_object)
def add_image(self, can_delete = True):
'''Add an image to the image_groups collection
can_delete - set this to False to keep from showing the "remove"
button for images that must be present.
'''
group = cps.SettingsGroup()
if can_delete:
group.append("divider", cps.Divider(line=False))
group.append("image_name", cps.ImageNameSubscriber('Select an image to measure','None',
doc = '''What is the name of an image to be measured?'''))
if len(self.image_groups) == 0: # Insert space between 1st two images for aesthetics
group.append("extra_divider", cps.Divider(line=False))
if can_delete:
group.append("remover", cps.RemoveSettingButton("","Remove this image", self.image_groups, group))
self.image_groups.append(group)
def add_object(self, can_delete = True):
'''Add an object to the object_groups collection'''
group = cps.SettingsGroup()
if can_delete:
group.append("divider", cps.Divider(line=False))
group.append("object_name", cps.ObjectNameSubscriber('Select an object to measure','None',
doc = '''What is the name of objects to be measured?'''))
if can_delete:
group.append("remover", cps.RemoveSettingButton('', 'Remove this object', self.object_groups, group))
self.object_groups.append(group)
def settings(self):
'''Return the settings to be saved in the pipeline'''
result = [self.image_count, self.object_count]
result += [image_group.image_name for image_group in self.image_groups]
result += [self.images_or_objects]
result += [object_group.object_name for object_group in self.object_groups]
return result
def prepare_settings(self, setting_values):
'''Make sure there are the right number of image and object slots for the incoming settings'''
image_count = int(setting_values[0])
object_count = int(setting_values[1])
if image_count < 2:
raise ValueError("The MeasureCorrelate module must have at least two input images. %d found in pipeline file"%image_count)
del self.image_groups[image_count:]
while len(self.image_groups) < image_count:
self.add_image()
del self.object_groups[object_count:]
while len(self.object_groups) < object_count:
self.add_object()
def visible_settings(self):
result = []
for image_group in self.image_groups:
result += image_group.visible_settings()
result += [self.add_image_button, self.spacer_2, self.images_or_objects, self.manual_threshold]
if self.wants_objects():
for object_group in self.object_groups:
result += object_group.visible_settings()
result += [self.add_object_button]
return result
def get_image_pairs(self):
'''Yield all permutations of pairs of images to correlate
Yields the pairs of images in a canonical order.
'''
for i in range(self.image_count.value-1):
for j in range(i+1, self.image_count.value):
yield (self.image_groups[i].image_name.value,
self.image_groups[j].image_name.value)
def wants_images(self):
'''True if the user wants to measure correlation on whole images'''
return self.images_or_objects in (M_IMAGES, M_IMAGES_AND_OBJECTS)
def wants_objects(self):
'''True if the user wants to measure per-object correlations'''
return self.images_or_objects in (M_OBJECTS, M_IMAGES_AND_OBJECTS)
def run(self, workspace):
'''Calculate measurements on an image set'''
statistics = [["First image","Second image","Objects","Measurement","Value"]]
for first_image_name, second_image_name in self.get_image_pairs():
if self.wants_images():
statistics += self.run_image_pair_images(workspace,
first_image_name,
second_image_name)
if self.wants_objects():
for object_name in [group.object_name.value for group in self.object_groups]:
statistics += self.run_image_pair_objects(workspace,
first_image_name,
second_image_name,
object_name)
if not workspace.frame is None:
figure = workspace.create_or_find_figure(title="MeasureRWC, image cycle #%d"%(
workspace.measurements.image_set_number),subplots=(1,1))
figure.subplot_table(0,0,statistics,(0.2,0.2,0.2,0.2,0.2))
def run_image_pair_images(self, workspace, first_image_name,
second_image_name):
'''Calculate the correlation between the pixels of two images'''
first_image = workspace.image_set.get_image(first_image_name,
must_be_grayscale=True)
second_image = workspace.image_set.get_image(second_image_name,
must_be_grayscale=True)
first_pixel_data = first_image.pixel_data
first_mask = first_image.mask
first_pixel_count = np.product(first_pixel_data.shape)
second_pixel_data = second_image.pixel_data
second_mask = second_image.mask
second_pixel_count = np.product(second_pixel_data.shape)
#
# Crop the larger image similarly to the smaller one
#
if first_pixel_count < second_pixel_count:
second_pixel_data = first_image.crop_image_similarly(second_pixel_data)
second_mask = first_image.crop_image_similarly(second_mask)
elif second_pixel_count < first_pixel_count:
first_pixel_data = second_image.crop_image_similarly(first_pixel_data)
first_mask = second_image.crop_image_similarly(first_mask)
mask = (first_mask & second_mask &
(~ np.isnan(first_pixel_data)) &
(~ np.isnan(second_pixel_data)))
if np.any(mask):
#
# Perform the correlation, which returns:
# [ [ii, ij],
# [ji, jj] ]
#
fi = first_pixel_data[mask]
si = second_pixel_data[mask]
#corr = np.corrcoef((fi,si))[1,0]
# Do the ranking
au,arank=np.unique(fi,return_inverse=True)
bu,brank=np.unique(si,return_inverse=True)
# Reverse ranking
amax=np.max(arank)+1
bmax=np.max(brank)+1
arank = -(arank.astype(float)-amax)
brank = -(brank.astype(float)-bmax)
# Measure absolute difference in ranks
d=np.absolute(arank-brank)
# Get the maximal ranking
rn=np.max(np.hstack((arank,brank)))
# Calculate weights matrix
w=(rn-d)/rn
# Thresholding and RWC calculations
t = self.manual_threshold.value
#t=0.15
ta=t*np.max(fi)
tb=t*np.max(si)
a1=np.array(fi, copy=True)
b1=np.array(si, copy=True)
a1[fi<=ta]=0
asum=np.sum(a1)
a1[si<=tb]=0
rwc1=np.sum(a1.flatten()*w)/asum
b1[si<=tb]=0
bsum=np.sum(b1)
b1[fi<=ta]=0
rwc2=np.sum(b1.flatten()*w)/bsum
else:
rwc1 = np.NaN
rwc2 = np.NaN
#
# Add the measurements
#
rwc1_measurement = F_CORRELATION_FORMAT%(first_image_name,
second_image_name)
rwc2_measurement = F_CORRELATION_FORMAT%(second_image_name,
first_image_name)
workspace.measurements.add_image_measurement(rwc1_measurement, rwc1)
workspace.measurements.add_image_measurement(rwc2_measurement, rwc2)
return [[first_image_name, second_image_name, "-", "Correlation","%.2f"%rwc1],
[second_image_name, first_image_name, "-", "Correlation","%.2f"%rwc2]]
def run_image_pair_objects(self, workspace, first_image_name,
second_image_name, object_name):
'''Calculate per-object correlations between intensities in two images'''
first_image = workspace.image_set.get_image(first_image_name,
must_be_grayscale=True)
second_image = workspace.image_set.get_image(second_image_name,
must_be_grayscale=True)
objects = workspace.object_set.get_objects(object_name)
#
# Crop both images to the size of the labels matrix
#
labels = objects.segmented
try:
first_pixels = objects.crop_image_similarly(first_image.pixel_data)
first_mask = objects.crop_image_similarly(first_image.mask)
except ValueError:
first_pixels, m1 = cpo.size_similarly(labels, first_image.pixel_data)
first_mask, m1 = cpo.size_similarly(labels, first_image.mask)
first_mask[~m1] = False
try:
second_pixels = objects.crop_image_similarly(second_image.pixel_data)
second_mask = objects.crop_image_similarly(second_image.mask)
except ValueError:
second_pixels, m1 = cpo.size_similarly(labels, second_image.pixel_data)
second_mask, m1 = cpo.size_similarly(labels, second_image.mask)
second_mask[~m1] = False
mask = ((labels > 0) & first_mask & second_mask)
first_pixels = first_pixels[mask]
second_pixels = second_pixels[mask]
labels = labels[mask]
if len(labels)==0:
n_objects = 0
else:
n_objects = np.max(labels)
if n_objects == 0:
rwc1 = np.zeros((0,))
rwc2 = np.zeros((0,))
else:
object_labels = np.unique(labels)
rwc1 = np.zeros(np.shape(object_labels))
rwc2 = np.zeros(np.shape(object_labels))
for oindex in object_labels:
fi = first_pixels[labels==oindex]
si = second_pixels[labels==oindex]
#corr = np.corrcoef((fi,si))[1,0]
# Do the ranking
au,arank=np.unique(fi,return_inverse=True)
bu,brank=np.unique(si,return_inverse=True)
# Reverse ranking
amax=np.max(arank)+1
bmax=np.max(brank)+1
arank = -(arank.astype(float)-amax)
brank = -(brank.astype(float)-bmax)
# Measure absolute difference in ranks
d=np.absolute(arank-brank)
# Get the maximal ranking
rn=np.max(np.hstack((arank,brank)))
# Calculate weights matrix
w=(rn-d)/rn
# Thresholding and RWC calculations
t = self.manual_threshold.value
#t=0.15
ta=t*np.max(fi)
tb=t*np.max(si)
a1=np.array(fi, copy=True)
b1=np.array(si, copy=True)
a1[fi<=ta]=0
asum=np.sum(a1)
a1[si<=tb]=0
rwc1_temp=np.sum(a1.flatten()*w)/asum
b1[si<=tb]=0
bsum=np.sum(b1)
b1[fi<=ta]=0
rwc2_temp=np.sum(b1.flatten()*w)/bsum
# And RWC values are...
rwc1[oindex-1]= rwc1_temp
rwc2[oindex-1]= rwc2_temp
rwc1_measurement = ("Correlation_RWC_%s_%s" %
(first_image_name, second_image_name))
rwc2_measurement = ("Correlation_RWC_%s_%s" %
(second_image_name, first_image_name))
workspace.measurements.add_measurement(object_name, rwc1_measurement, rwc1)
workspace.measurements.add_measurement(object_name, rwc2_measurement, rwc2)
if n_objects == 0:
return [[first_image_name, second_image_name, object_name,
"Mean correlation","-"],
[first_image_name, second_image_name, object_name,
"Median correlation","-"],
[first_image_name, second_image_name, object_name,
"Min correlation","-"],
[first_image_name, second_image_name, object_name,
"Max correlation","-"]]
else:
return [[first_image_name, second_image_name, object_name,
"Mean correlation","%.2f"%np.mean(rwc1)],
[first_image_name, second_image_name, object_name,
"Median correlation","%.2f"%np.median(rwc1)],
[first_image_name, second_image_name, object_name,
"Min correlation","%.2f"%np.min(rwc1)],
[first_image_name, second_image_name, object_name,
"Max correlation","%.2f"%np.max(rwc1)]]
def get_measurement_columns(self, pipeline):
'''Return column definitions for all measurements made by this module'''
columns = []
for first_image, second_image in self.get_image_pairs():
if self.wants_images():
columns += [(cpmeas.IMAGE,
F_CORRELATION_FORMAT%(first_image, second_image),
cpmeas.COLTYPE_FLOAT),
(cpmeas.IMAGE,
F_CORRELATION_FORMAT%(second_image, first_image),
cpmeas.COLTYPE_FLOAT)]
if self.wants_objects():
for i in range(self.object_count.value):
object_name = self.object_groups[i].object_name.value
columns += [(object_name,
F_CORRELATION_FORMAT %
(first_image, second_image),
cpmeas.COLTYPE_FLOAT),
(object_name,
F_CORRELATION_FORMAT %
(second_image, first_image),
cpmeas.COLTYPE_FLOAT)]
return columns
def get_categories(self, pipeline, object_name):
'''Return the categories supported by this module for the given object
object_name - name of the measured object or cpmeas.IMAGE
'''
if ((object_name == cpmeas.IMAGE and self.wants_images()) or
((object_name != cpmeas.IMAGE) and self.wants_objects() and
(object_name in [x.object_name.value for x in self.object_groups]))):
return ["Correlation"]
return []
def get_measurements(self, pipeline, object_name, category):
if self.get_categories(pipeline, object_name) == [category]:
if object_name == cpmeas.IMAGE:
return ["RWC"]
else:
return ["RWC"]
return []
def get_measurement_images(self, pipeline, object_name, category,
measurement):
'''Return the joined pairs of images measured'''
if measurement in self.get_measurements(pipeline, object_name, category):
result = []
for x in self.get_image_pairs():
result += "%s_%s"%(x[0],x[1])
result += "%s_%s"%(x[1],x[0])
#return ["%s_%s"%x for x in self.get_image_pairs()]
return result
return []
|
gpl-2.0
|
orlandoacevedo/MCGPU
|
gtest/test/gtest_help_test.py
|
2968
|
5856
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
gpl-3.0
|
mythnc/gbf-bot
|
gbf_bot/slime_blasting.py
|
1
|
1115
|
import logging
import random
import time
import pyautogui
from .constants import slime_blasting_config as config
from .components import Button, Mouse
logger = logging.getLogger(__name__)
start = Button("start.png", config["start"])
character1 = Button("character.png", config["character1"])
skill = Button("skill.png", config["skill"])
back = Button("back.png", config["back"])
def activate():
logger.info("slime blasting start")
pyautogui.PAUSE = 1.2
# click twice for window choice
logger.info("click start")
start.double_click()
# AP will be checked before next step
# make sure AP is enough
time.sleep(5 + random.random() * 0.5)
logger.info("click character")
character1.double_click()
time.sleep(random.random() * 0.25)
logger.info("click skill")
skill.click()
time.sleep(random.random() * 0.25)
logger.info("click back")
back.click()
time.sleep(1 + random.random() * 0.25)
# battle result
logger.info("click back again")
Mouse.click_again()
time.sleep(random.random() * 0.25)
logger.info("slime blasting end")
|
mit
|
birocorneliu/youtube-dl
|
youtube_dl/extractor/kickstarter.py
|
111
|
2654
|
# encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KickStarterIE(InfoExtractor):
_VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P<id>[^/]*)/.*'
_TESTS = [{
'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location',
'md5': 'c81addca81327ffa66c642b5d8b08cab',
'info_dict': {
'id': '1404461844',
'ext': 'mp4',
'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
'description': (
'A unique motocross documentary that examines the '
'life and mind of one of sports most elite athletes: Josh Grant.'
),
},
}, {
'note': 'Embedded video (not using the native kickstarter video service)',
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
'info_dict': {
'id': '78704821',
'ext': 'mp4',
'uploader_id': 'pebble',
'uploader': 'Pebble Technology',
'title': 'Pebble iOS Notifications',
}
}, {
'url': 'https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html',
'info_dict': {
'id': '1420158244',
'ext': 'mp4',
'title': 'Power Drive 2000',
},
'expected_warnings': ['OpenGraph description'],
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>\s*(.*?)(?:\s*— Kickstarter)?\s*</title>',
webpage, 'title')
video_url = self._search_regex(
r'data-video-url="(.*?)"',
webpage, 'video URL', default=None)
if video_url is None: # No native kickstarter, look for embedded videos
return {
'_type': 'url_transparent',
'ie_key': 'Generic',
'url': url,
'title': title,
}
thumbnail = self._og_search_thumbnail(webpage, default=None)
if thumbnail is None:
thumbnail = self._html_search_regex(
r'<img[^>]+class="[^"]+\s*poster\s*[^"]+"[^>]+src="([^"]+)"',
webpage, 'thumbnail image', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': thumbnail,
}
|
unlicense
|
xchenum/quantum
|
quantum/tests/unit/database_stubs.py
|
7
|
6968
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
"""
stubs.py provides interface methods for
the database test cases
"""
import logging
from quantum.db import api as db
LOG = logging.getLogger('quantum.tests.database_stubs')
class QuantumDB(object):
"""Class conisting of methods to call Quantum db methods"""
def get_all_networks(self, tenant_id):
"""Get all networks"""
nets = []
try:
for net in db.network_list(tenant_id):
LOG.debug("Getting network: %s", net.uuid)
net_dict = {}
net_dict["tenant_id"] = net.tenant_id
net_dict["id"] = str(net.uuid)
net_dict["name"] = net.name
nets.append(net_dict)
except Exception, exc:
LOG.error("Failed to get all networks: %s", str(exc))
return nets
def get_network(self, network_id):
"""Get a network"""
net = []
try:
for net in db.network_get(network_id):
LOG.debug("Getting network: %s", net.uuid)
net_dict = {}
net_dict["tenant_id"] = net.tenant_id
net_dict["id"] = str(net.uuid)
net_dict["name"] = net.name
net.append(net_dict)
except Exception, exc:
LOG.error("Failed to get network: %s", str(exc))
return net
def create_network(self, tenant_id, net_name):
"""Create a network"""
net_dict = {}
try:
res = db.network_create(tenant_id, net_name)
LOG.debug("Created network: %s", res.uuid)
net_dict["tenant_id"] = res.tenant_id
net_dict["id"] = str(res.uuid)
net_dict["name"] = res.name
return net_dict
except Exception, exc:
LOG.error("Failed to create network: %s", str(exc))
def delete_network(self, net_id):
"""Delete a network"""
try:
net = db.network_destroy(net_id)
LOG.debug("Deleted network: %s", net.uuid)
net_dict = {}
net_dict["id"] = str(net.uuid)
return net_dict
except Exception, exc:
LOG.error("Failed to delete network: %s", str(exc))
def update_network(self, tenant_id, net_id, param_data):
"""Rename a network"""
try:
print param_data
net = db.network_update(net_id, tenant_id, **param_data)
LOG.debug("Updated network: %s", net.uuid)
net_dict = {}
net_dict["id"] = str(net.uuid)
net_dict["name"] = net.name
return net_dict
except Exception, exc:
LOG.error("Failed to update network: %s", str(exc))
def get_all_ports(self, net_id):
"""Get all ports"""
ports = []
try:
for port in db.port_list(net_id):
LOG.debug("Getting port: %s", port.uuid)
port_dict = {}
port_dict["id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["attachment"] = port.interface_id
port_dict["state"] = port.state
ports.append(port_dict)
return ports
except Exception, exc:
LOG.error("Failed to get all ports: %s", str(exc))
def get_port(self, net_id, port_id):
"""Get a port"""
port_list = []
port = db.port_get(port_id, net_id)
try:
LOG.debug("Getting port: %s", port.uuid)
port_dict = {}
port_dict["id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["attachment"] = port.interface_id
port_dict["state"] = port.state
port_list.append(port_dict)
return port_list
except Exception, exc:
LOG.error("Failed to get port: %s", str(exc))
def create_port(self, net_id):
"""Add a port"""
port_dict = {}
try:
port = db.port_create(net_id)
LOG.debug("Creating port %s", port.uuid)
port_dict["id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["attachment"] = port.interface_id
port_dict["state"] = port.state
return port_dict
except Exception, exc:
LOG.error("Failed to create port: %s", str(exc))
def delete_port(self, net_id, port_id):
"""Delete a port"""
try:
port = db.port_destroy(port_id, net_id)
LOG.debug("Deleted port %s", port.uuid)
port_dict = {}
port_dict["id"] = str(port.uuid)
return port_dict
except Exception, exc:
LOG.error("Failed to delete port: %s", str(exc))
def update_port(self, net_id, port_id, **kwargs):
"""Update a port"""
try:
port = db.port_update(port_id, net_id, **kwargs)
LOG.debug("Updated port %s", port.uuid)
port_dict = {}
port_dict["id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["attachment"] = port.interface_id
port_dict["state"] = port.state
return port_dict
except Exception, exc:
LOG.error("Failed to update port state: %s", str(exc))
def plug_interface(self, net_id, port_id, int_id):
"""Plug interface to a port"""
try:
port = db.port_set_attachment(port_id, net_id, int_id)
LOG.debug("Attached interface to port %s", port.uuid)
port_dict = {}
port_dict["id"] = str(port.uuid)
port_dict["net-id"] = str(port.network_id)
port_dict["attachment"] = port.interface_id
port_dict["state"] = port.state
return port_dict
except Exception, exc:
LOG.error("Failed to plug interface: %s", str(exc))
def unplug_interface(self, net_id, port_id):
"""Unplug interface to a port"""
try:
db.port_unset_attachment(port_id, net_id)
LOG.debug("Detached interface from port %s", port_id)
except Exception, exc:
LOG.error("Failed to unplug interface: %s", str(exc))
|
apache-2.0
|
jmacmahon/invenio
|
modules/websearch/lib/websearch_external_collections_templates.py
|
3
|
7238
|
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Template for the external collections search."""
__revision__ = "$Id$"
import cgi
from invenio.config import CFG_SITE_LANG
from invenio.messages import gettext_set_language
from invenio.urlutils import create_html_link
class Template:
"""Template class for the external collection search. To be loaded with template.load()"""
def __init__(self):
pass
def external_collection_seealso_box(self, lang, links,
prolog_start='<table class="externalcollectionsbox"><tr><th colspan="2" class="externalcollectionsboxheader">',
prolog_end='</th></tr><tr><td class="externalcollectionsboxbody">',
column_separator='</td><td class="externalcollectionsboxbody">',
link_separator= '<br />', epilog='</td></tr></table>'):
"""Creates the box that proposes links to other useful search engines like Google.
lang: string - The language to display in
links: list of string - List of links to display in the box
prolog_start, prolog_end, column_separator, link_separator, epilog': strings -
default HTML code for the specified position in the box"""
_ = gettext_set_language(lang)
out = ""
if links:
out += '<a name="externalcollectionsbox"></a>'
out += prolog_start
out += _("Haven't found what you were looking for? Try your search on other servers:")
out += prolog_end
nb_out_links_in_one_column = len(links)/2 + len(links) % 2
out += link_separator.join(links[:nb_out_links_in_one_column])
out += column_separator
out += link_separator.join(links[nb_out_links_in_one_column:])
out += epilog
return out
def external_collection_overview(self, lang=CFG_SITE_LANG, engine_list=()):
"""Prints results overview box with links to particular collections below.
lang: The language to display
engine_list: The external engines to be used"""
if len(engine_list) < 1:
return ""
_ = gettext_set_language(lang)
out = """
<table class="externalcollectionsresultsbox">
<thead>
<tr>
<th class="externalcollectionsresultsboxheader"><strong>%s</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td class="externalcollectionsresultsboxbody"> """ % _("External collections results overview:")
for engine in engine_list:
internal_name = get_link_name(engine.name)
name = _(engine.name)
out += """<strong><a href="#%(internal_name)s">%(name)s</a></strong><br />""" % locals()
out += """
</td>
</tr>
</tbody>
</table>
"""
return out
def print_info_line(req,
html_external_engine_name_box,
html_external_engine_nb_results_box,
html_external_engine_nb_seconds_box):
"""Print on req an information line about results of an external collection search."""
req.write('<table class="externalcollectionsresultsbox"><tr>')
req.write('<td class="externalcollectionsresultsboxheader">')
req.write('<big><strong>' + \
html_external_engine_name_box + \
'</strong></big>')
req.write(' ')
req.write(html_external_engine_nb_results_box)
req.write('</td><td class="externalcollectionsresultsboxheader" width="20%" align="right">')
req.write('<small>' + \
html_external_engine_nb_seconds_box + \
'</small>')
req.write('</td></tr></table><br />')
def print_timeout(req, lang, engine, name, url):
"""Print info line for timeout."""
_ = gettext_set_language(lang)
req.write('<a name="%s"></a>' % get_link_name(engine.name))
print_info_line(req,
create_html_link(url, {}, name, {}, False, False),
'',
_('Search timed out.'))
message = _("The external search engine has not responded in time. You can check its results here:")
req.write(message + ' ' + create_html_link(url, {}, name, {}, False, False) + '<br />')
def get_link_name(name):
"""Return a hash string for the string name."""
return hex(abs(name.__hash__()))
def print_results(req, lang, pagegetter, infos, current_time, print_search_info=True, print_body=True):
"""Print results of a given search engine.
current_time is actually the duration, expressed in seconds of execution of request.
"""
_ = gettext_set_language(lang)
url = infos[0]
engine = infos[1]
internal_name = get_link_name(engine.name)
name = _(engine.name)
base_url = engine.base_url
results = engine.parser.parse_and_get_results(pagegetter.data)
html_tit = make_url(name, base_url)
if print_search_info:
num = format_number(engine.parser.parse_num_results())
if num:
if num == '0':
html_num = _('No results found.')
html_sec = ''
else:
html_num = '<strong>' + \
make_url(_('%s results found') % num, url) + \
'</strong>'
html_sec = '(' + _('%s seconds') % ('%2.2f' % current_time) + ')'
else:
html_num = _('No results found.')
html_sec = ''
req.write('<a name="%(internal_name)s"></a>' % locals())
print_info_line(req,
html_tit,
html_num,
html_sec)
if print_body:
for result in results:
req.write(result.html + '<br />')
if not results:
req.write(_('No results found.') + '<br />')
def make_url(name, url):
if url:
return '<a href="' + cgi.escape(url) + '">' + name + '</a>'
else:
return name
def format_number(num, separator=','):
"""Format a number by separating thousands with a separator (by default a comma)
>>> format_number(10)
'10'
>>> format_number(10000)
'10,000'
>>> format_number(' 000213212424249 ', '.')
'213.212.424.249'
"""
result = ""
try:
num = int(num)
except:
return None
if num == 0:
return '0'
while num > 0:
part = num % 1000
num = num / 1000
result = "%03d" % part + separator + result
return result.strip('0').strip(separator)
|
gpl-2.0
|
volatilityfoundation/volatility
|
volatility/plugins/linux/threads.py
|
4
|
3427
|
# Volatility
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
@author: Edwin Smulders
@license: GNU General Public License 2.0 or later
@contact: mail@edwinsmulders.eu
"""
import volatility.plugins.linux.pslist as linux_pslist
from volatility.renderers.basic import Address
from volatility.renderers import TreeGrid
class linux_threads(linux_pslist.linux_pslist):
""" Prints threads of processes """
def unified_output(self, data):
return TreeGrid([("Offset",Address),
("NameProc",str),
("TGID",int),
("ThreadPid",str),
("ThreadName", str),
("thread_offset",Address),
("Addr_limit",Address),
("uid_cred",int),
("gid_cred",int),
("euid_cred",int)
],
self.generator(data))
def generator(self, data):
for task in data:
euidcred = task.euid
uidcred = task.uid
gidcred = task.gid
for thread in task.threads():
addr_limit = self.get_addr_limit(thread)
yield(0,[Address(task.obj_offset),
str(task.comm),
int(task.tgid),
str(thread.pid),
str(thread.comm),
Address(thread.obj_offset),
Address(addr_limit),
int(uidcred),
int(gidcred),
int(euidcred)
])
def get_addr_limit(self,thread, addrvar_offset = 8 ):
"""
Here we read the addr_limit variable of a thread by reading at the offset of the thread plus
the offset of the addr_limit variable inside the thread_info
:param thread: thread from which we want the information
:param addrvar_offset: offset of the addr_limit var in the thread_info
:return: the addr_limit
"""
addr_space = thread.get_process_address_space()
offset = thread.obj_offset + addrvar_offset
if addr_space.__class__ == "LinuxAMD64PagedMemory":
return addr_space.read_long_long_phys(offset)
else:
return addr_space.read_long_phys(offset)
def render_text(self, outfd, data):
for task in data:
outfd.write("\nProcess Name: {}\nProcess ID: {}\n".format(task.comm, task.tgid))
self.table_header(outfd, [('Thread PID', '13'), ('Thread Name', '16')])
for thread in task.threads():
self.table_row(outfd, str(thread.pid), thread.comm)
|
gpl-2.0
|
mccheung/kbengine
|
kbe/src/lib/python/Lib/test/test_print.py
|
96
|
4258
|
import unittest
from io import StringIO
from test import support
NotDefined = object()
# A dispatch table all 8 combinations of providing
# sep, end, and file.
# I use this machinery so that I'm not just passing default
# values to print, I'm either passing or not passing in the
# arguments.
dispatch = {
(False, False, False):
lambda args, sep, end, file: print(*args),
(False, False, True):
lambda args, sep, end, file: print(file=file, *args),
(False, True, False):
lambda args, sep, end, file: print(end=end, *args),
(False, True, True):
lambda args, sep, end, file: print(end=end, file=file, *args),
(True, False, False):
lambda args, sep, end, file: print(sep=sep, *args),
(True, False, True):
lambda args, sep, end, file: print(sep=sep, file=file, *args),
(True, True, False):
lambda args, sep, end, file: print(sep=sep, end=end, *args),
(True, True, True):
lambda args, sep, end, file: print(sep=sep, end=end, file=file, *args),
}
# Class used to test __str__ and print
class ClassWith__str__:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
class TestPrint(unittest.TestCase):
"""Test correct operation of the print function."""
def check(self, expected, args,
sep=NotDefined, end=NotDefined, file=NotDefined):
# Capture sys.stdout in a StringIO. Call print with args,
# and with sep, end, and file, if they're defined. Result
# must match expected.
# Look up the actual function to call, based on if sep, end,
# and file are defined.
fn = dispatch[(sep is not NotDefined,
end is not NotDefined,
file is not NotDefined)]
with support.captured_stdout() as t:
fn(args, sep, end, file)
self.assertEqual(t.getvalue(), expected)
def test_print(self):
def x(expected, args, sep=NotDefined, end=NotDefined):
# Run the test 2 ways: not using file, and using
# file directed to a StringIO.
self.check(expected, args, sep=sep, end=end)
# When writing to a file, stdout is expected to be empty
o = StringIO()
self.check('', args, sep=sep, end=end, file=o)
# And o will contain the expected output
self.assertEqual(o.getvalue(), expected)
x('\n', ())
x('a\n', ('a',))
x('None\n', (None,))
x('1 2\n', (1, 2))
x('1 2\n', (1, ' ', 2))
x('1*2\n', (1, 2), sep='*')
x('1 s', (1, 's'), end='')
x('a\nb\n', ('a', 'b'), sep='\n')
x('1.01', (1.0, 1), sep='', end='')
x('1*a*1.3+', (1, 'a', 1.3), sep='*', end='+')
x('a\n\nb\n', ('a\n', 'b'), sep='\n')
x('\0+ +\0\n', ('\0', ' ', '\0'), sep='+')
x('a\n b\n', ('a\n', 'b'))
x('a\n b\n', ('a\n', 'b'), sep=None)
x('a\n b\n', ('a\n', 'b'), end=None)
x('a\n b\n', ('a\n', 'b'), sep=None, end=None)
x('*\n', (ClassWith__str__('*'),))
x('abc 1\n', (ClassWith__str__('abc'), 1))
# errors
self.assertRaises(TypeError, print, '', sep=3)
self.assertRaises(TypeError, print, '', end=3)
self.assertRaises(AttributeError, print, '', file='')
def test_print_flush(self):
# operation of the flush flag
class filelike:
def __init__(self):
self.written = ''
self.flushed = 0
def write(self, str):
self.written += str
def flush(self):
self.flushed += 1
f = filelike()
print(1, file=f, end='', flush=True)
print(2, file=f, end='', flush=True)
print(3, file=f, flush=False)
self.assertEqual(f.written, '123\n')
self.assertEqual(f.flushed, 2)
# ensure exceptions from flush are passed through
class noflush:
def write(self, str):
pass
def flush(self):
raise RuntimeError
self.assertRaises(RuntimeError, print, 1, file=noflush(), flush=True)
if __name__ == "__main__":
unittest.main()
|
lgpl-3.0
|
attilahorvath/phantomjs
|
src/qt/qtbase/src/3rdparty/freetype/src/tools/docmaker/formatter.py
|
515
|
4962
|
# Formatter (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
from sources import *
from content import *
from utils import *
# This is the base Formatter class. Its purpose is to convert
# a content processor's data into specific documents (i.e., table of
# contents, global index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example,
# the file tohtml.py contains the definition of the HtmlFormatter sub-class
# used to output -- you guessed it -- HTML.
#
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( index_sort )
def add_identifier( self, name, block ):
if self.identifiers.has_key( name ):
# duplicate name!
sys.stderr.write( \
"WARNING: duplicate definition for '" + name + "' in " + \
block.location() + ", previous definition in " + \
self.identifiers[name].location() + "\n" )
else:
self.identifiers[name] = block
#
# Formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# Formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# Formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
block = self.identifiers[name]
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
|
bsd-3-clause
|
indhub/mxnet
|
tools/caffe_translator/scripts/convert_caffe_model.py
|
30
|
5271
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Script to convert Caffe .modelfile to MXNet .params file"""
from __future__ import print_function
import argparse
import mxnet as mx
import caffe
from caffe.proto import caffe_pb2
class CaffeModelConverter(object):
"""Converts Caffe .modelfile to MXNet .params file"""
def __init__(self):
self.dict_param = {}
self.layers = None
def add_param(self, param_name, layer_index, blob_index):
"""Add a param to the .params file"""
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index]))
def add_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param to .params file. Example: weights of a fully connected layer."""
self.add_param('arg:%s' % param_name, layer_index, blob_index)
def add_aux_param(self, param_name, layer_index, blob_index):
"""Add an aux param to .params file. Example: moving_mean in BatchNorm layer """
self.add_param('aux:%s' % param_name, layer_index, blob_index)
def add_optional_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param. If there is no such param in .caffemodel fie, silently ignore it."""
blobs = self.layers[layer_index].blobs
if blob_index < len(blobs):
self.add_arg_param(param_name, layer_index, blob_index)
def convert(self, caffemodel_path, outmodel_path):
"""Convert a Caffe .caffemodel file to MXNet .params file"""
net_param = caffe_pb2.NetParameter()
with open(caffemodel_path, 'rb') as caffe_model_file:
net_param.ParseFromString(caffe_model_file.read())
layers = net_param.layer
self.layers = layers
for idx, layer in enumerate(layers):
layer_name = str(layer.name)
if layer.blobs:
# If this is a layer that has only weight and bias as parameter
if layer.type == 'Convolution' or layer.type == 'InnerProduct' \
or layer.type == 'Deconvolution':
# Add weight and bias to the dictionary
self.add_arg_param('%s_weight' % layer_name, layer_index=idx, blob_index=0)
self.add_optional_arg_param('%s_bias' % layer_name, layer_index=idx,
blob_index=1)
elif layer.type == 'BatchNorm':
gamma_param_name = '%s_gamma' % layer_name
beta_param_name = '%s_beta' % layer_name
next_layer = layers[idx + 1]
if next_layer.type == 'Scale':
# If next layer is scale layer, get gamma and beta from there
self.add_arg_param(gamma_param_name, layer_index=idx+1, blob_index=0)
self.add_arg_param(beta_param_name, layer_index=idx+1, blob_index=1)
mean_param_name = '%s_moving_mean' % layer_name
var_param_name = '%s_moving_var' % layer_name
self.add_aux_param(mean_param_name, layer_index=idx, blob_index=0)
self.add_aux_param(var_param_name, layer_index=idx, blob_index=1)
elif layer.type == 'Scale':
prev_layer = layers[idx - 1]
if prev_layer.type == 'BatchNorm':
continue
else:
# Use the naming convention used by CaffeOp
self.add_arg_param('%s_0_weight' % layer_name, layer_index=idx,
blob_index=0)
self.add_optional_arg_param('%s_1_bias' % layer_name,
layer_index=idx, blob_index=1)
mx.nd.save(outmodel_path, self.dict_param)
def main():
"""Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion"""
parser = argparse.ArgumentParser(description='.caffemodel to MXNet .params converter.')
parser.add_argument('caffemodel', help='Path to the .caffemodel file to convert.')
parser.add_argument('output_file_name', help='Name of the output .params file.')
args = parser.parse_args()
converter = CaffeModelConverter()
converter.convert(args.caffemodel, args.output_file_name)
if __name__ == '__main__':
main()
|
apache-2.0
|
thegooglecodearchive/healpy
|
healpy/fitsfunc.py
|
2
|
14186
|
#
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
"""Provides input and output functions for Healpix maps, alm, and cl.
"""
import pyfits as pyf
import numpy as npy
import pixelfunc
from sphtfunc import Alm
import warnings
from _healpy_pixel_lib import UNSEEN
from exceptions import NotImplementedError
class HealpixFitsWarning(Warning):
pass
def read_cl(filename,dtype=npy.float32,h=False):
"""Reads Cl from an healpix file, as IDL fits2cl.
Input:
- filename: the fits file name
Return:
- cl: the cl array, currently TT only
"""
hdulist=pyf.open(filename)
return hdulist[1].data.field(0)
def write_cl(filename,cl,dtype=npy.float32):
"""Writes Cl into an healpix file, as IDL cl2fits.
Input:
- filename: the fits file name
- cl: the cl array to write to file, currently TT only
"""
# check the dtype and convert it
fitsformat = getformat(dtype)
if isinstance(cl, list):
raise NotImplementedError('Currently it supports only temperature-only cls')
else: # we write only one TT
cols = [pyf.Column(name='TEMPERATURE',
format='%s'%fitsformat,
array=cl)]
coldefs=pyf.ColDefs(cols)
tbhdu = pyf.new_table(coldefs)
# add needed keywords
tbhdu.header.update('CREATOR','healpy')
tbhdu.writeto(filename,clobber=True)
def write_map(filename,m,nest=False,dtype=npy.float32,fits_IDL=True):
"""Writes an healpix map into an healpix file.
Input:
- filename: the fits file name
- m: the map to write. Possibly a sequence of 3 maps of same size.
They will be considered as I, Q, U maps
- nest=False: ordering scheme
- fits_IDL = true reshapes columns in rows of 1024, otherwise all the data will
go in one column
"""
if not hasattr(m, '__len__'):
raise TypeError('The map must be a sequence')
# check the dtype and convert it
fitsformat = getformat(dtype)
#print 'format to use: "%s"'%fitsformat
if hasattr(m[0], '__len__'):
# we should have three maps
if len(m) != 3 or len(m[1]) != len(m[0]) or len(m[2]) != len(m[0]):
raise ValueError("You should give 3 maps of same size "
"for polarisation...")
nside = pixelfunc.npix2nside(len(m[0]))
if nside < 0:
raise ValueError('Invalid healpix map : wrong number of pixel')
cols=[]
colnames=['I_STOKES','Q_STOKES','U_STOKES']
for cn,mm in zip(colnames,m):
if len(mm) > 1024 and fits_IDL:
# I need an ndarray, for reshape:
mm2 = npy.asarray(mm)
cols.append(pyf.Column(name=cn,
format='1024%s'%fitsformat,
array=mm2.reshape(mm2.size/1024,1024)))
else:
cols.append(pyf.Column(name=cn,
format='%s'%fitsformat,
array=mm))
else: # we write only one map
nside = pixelfunc.npix2nside(len(m))
if nside < 0:
raise ValueError('Invalid healpix map : wrong number of pixel')
if m.size > 1024 and fits_IDL:
cols = [pyf.Column(name='I_STOKES',
format='1024%s'%fitsformat,
array=m.reshape(m.size/1024,1024))]
else:
cols = [pyf.Column(name='I_STOKES',
format='%s'%fitsformat,
array=m)]
coldefs=pyf.ColDefs(cols)
tbhdu = pyf.new_table(coldefs)
# add needed keywords
tbhdu.header.update('PIXTYPE','HEALPIX','HEALPIX pixelisation')
if nest: ordering = 'NESTED'
else: ordering = 'RING'
tbhdu.header.update('ORDERING',ordering,
'Pixel ordering scheme, either RING or NESTED')
tbhdu.header.update('EXTNAME','xtension',
'name of this binary table extension')
tbhdu.header.update('NSIDE',nside,'Resolution parameter of HEALPIX')
tbhdu.header.update('FIRSTPIX', 0, 'First pixel # (0 based)')
tbhdu.header.update('LASTPIX',pixelfunc.nside2npix(nside)-1,
'Last pixel # (0 based)')
tbhdu.header.update('INDXSCHM','IMPLICIT',
'Indexing: IMPLICIT or EXPLICIT')
tbhdu.writeto(filename,clobber=True)
def read_map(filename,field=0,dtype=npy.float64,nest=False,hdu=1,h=False,
verbose=False):
"""Read an healpix map from a fits file.
Input:
- filename: the fits file name
Parameters:
- field: the column to read Default: 0
by convention 0 is temperature, 1 is Q, 2 is U
field can be a tuple to read multiple columns (0,1,2)
- dtype: force the conversion to some type. Default: npy.float64
- nest=False: if True return the map in NEST ordering, otherwise in RING ordering;
use fits keyword ORDERING to decide whether conversion is needed or not
if None, no conversion is performed
- hdu=1: the header number to look at (start at 0)
- h=False: if True, return also the header
- verbose=False: if True, print a number of diagnostic messages
Return:
- an array, a tuple of array, possibly with the header at the end if h
is True
"""
hdulist=pyf.open(filename)
#print hdulist[1].header
nside = int(hdulist[hdu].header.get('NSIDE'))
if nside is None:
warnings.warn("No NSIDE in the header file : will use length of array",
HealpixFitsWarning)
if verbose: print 'NSIDE = %d'%nside
if not pixelfunc.isnsideok(nside):
raise ValueError('Wrong nside parameter.')
ordering = hdulist[hdu].header.get('ORDERING','UNDEF').strip()
if ordering == 'UNDEF':
ordering = (nest and 'NESTED' or 'RING')
warnings.warn("No ORDERING keyword in header file : "
"assume %s"%ordering)
if verbose: print 'ORDERING = %s in fits file'%ordering
sz=pixelfunc.nside2npix(nside)
if not hasattr(field, '__len__'):
field = (field,)
ret = []
for ff in field:
m=hdulist[hdu].data.field(ff).astype(dtype).ravel()
if (not pixelfunc.isnpixok(m.size) or (sz>0 and sz != m.size)) and verbose:
print 'nside=%d, sz=%d, m.size=%d'%(nside,sz,m.size)
raise ValueError('Wrong nside parameter.')
if nest != None: # no conversion with None
if nest and ordering == 'RING':
idx = pixelfunc.nest2ring(nside,npy.arange(m.size,dtype=npy.int32))
m = m[idx]
if verbose: print 'Ordering converted to NEST'
elif (not nest) and ordering == 'NESTED':
idx = pixelfunc.ring2nest(nside,npy.arange(m.size,dtype=npy.int32))
m = m[idx]
if verbose: print 'Ordering converted to RING'
try:
m[pixelfunc.mask_bad(m)] = UNSEEN
except OverflowError, e:
pass
ret.append(m)
if len(ret) == 1:
if h:
return ret[0],hdulist[hdu].header.items()
else:
return ret[0]
else:
if h:
ret.append(hdulist[hdu].header.items())
return tuple(ret)
else:
return tuple(ret)
def write_alm(filename,alms,out_dtype=None,lmax=-1,mmax=-1,mmax_in=-1):
"""
Write alms to a fits file. In the fits file the alms are written
with explicit index scheme, index = l*l + l + m +1, possibly out of order.
By default write_alm makes a table with the same precision as the alms.
If specified, the lmax and mmax parameters truncate the input data to
include only alms for which l <= lmax and m <= mmax.
Input:
- filename: the filename of the output fits file
- alms: a complex array holding the alms.
Parameters:
- lmax: maximum l in the output file
- mmax: maximum m in the output file
- out_dtype: data type in the output file (must be a numpy dtype)
- mmax_in: maximum m in the input array
"""
l2max = Alm.getlmax(len(alms),mmax=mmax_in)
if (lmax != -1 and lmax > l2max):
raise ValueError("Too big lmax in parameter")
elif lmax == -1:
lmax = l2max
if mmax_in == -1:
mmax_in = l2max
if mmax == -1:
mmax = lmax
if mmax > mmax_in:
mmax = mmax_in
if (out_dtype == None):
out_dtype = alms.real.dtype
l,m = Alm.getlm(lmax)
idx = npy.where((l <= lmax)*(m <= mmax))
l = l[idx]
m = m[idx]
idx_in_original = Alm.getidx(l2max, l=l, m=m)
index = l**2 + l + m + 1
out_data = npy.empty(len(index),\
dtype=[('index','i'),\
('real',out_dtype),('imag',out_dtype)])
out_data['index'] = index
out_data['real'] = alms.real[idx_in_original]
out_data['imag'] = alms.imag[idx_in_original]
cindex = pyf.Column(name="index", format=getformat(npy.int32), unit="l*l+l+m+1", array=out_data['index'])
creal = pyf.Column(name="real", format=getformat(out_dtype), unit="unknown", array=out_data['real'])
cimag = pyf.Column(name="imag", format=getformat(out_dtype), unit="unknown", array=out_data['imag'])
coldefs=pyf.ColDefs([cindex,creal,cimag])
tbhdu = pyf.new_table(coldefs)
tbhdu.writeto(filename,clobber=True)
def read_alm(filename,hdu=1,return_mmax=False):
"""Read alm from a fits file. In the fits file, the alm are written
with explicit index scheme, index = l**2+l+m+1, while healpix cxx
uses index = m*(2*lmax+1-m)/2+l. The conversion is done in this
function.
Input:
- filename: the name of the fits file to read
Parameters:
- hdu: the header to read. Start at 0. Default: hdu=1
- return_mmax: If true, both the alms and mmax is returned in a tuple. Default: return_mmax=False
Return:
- alms: if return_mmax=False
- alms,mmax: if return_mmax=True
"""
idx, almr, almi = mrdfits(filename,hdu=hdu)
l = npy.floor(npy.sqrt(idx-1)).astype(long)
m = idx - l**2 - l - 1
if (m<0).any():
raise ValueError('Negative m value encountered !')
lmax = l.max()
mmax = m.max()
alm = almr*(0+0j)
i = Alm.getidx(lmax,l,m)
alm.real[i] = almr
alm.imag[i] = almi
if return_mmax:
return alm, mmax
else:
return alm
## Generic functions to read and write column of data in fits file
def mrdfits(filename,hdu=1):
"""Read a table in a fits file.
Input:
- filename: the name of the fits file to read
Parameters:
- hdu: the header to read. Start at 0. Default: hdu=1
Return:
- a list of column data in the given header
"""
hdulist=pyf.open(filename)
if hdu>=len(hdulist):
raise ValueError('Available hdu in [0-%d]'%len(hdulist))
hdu=hdulist[hdu]
val=[]
for i in range(len(hdu.columns)):
val.append(hdu.data.field(i))
hdulist.close()
del hdulist
return val
def mwrfits(filename,data,hdu=1,colnames=None,keys=None):
"""Write columns to a fits file in a table extension.
Input:
- filename: the fits file name
- data: a list of 1D arrays to write in the table
Parameters:
- hdu: header where to write the data. Default: 1
- colnames: the column names
- keys: a dictionary with keywords to write in the header
"""
# Check the inputs
if colnames is not None:
if len(colnames) != len(data):
raise ValueError("colnames and data must the same length")
else:
colnames = ['']*len(data)
cols=[]
for line in xrange(len(data)):
cols.append(pyf.Column(name=colnames[line],
format=getformat(data[line]),
array=data[line]))
coldefs=pyf.ColDefs(cols)
tbhdu = pyf.new_table(coldefs)
if type(keys) is dict:
for k,v in keys.items():
tbhdu.header.update(k,v)
# write the file
tbhdu.writeto(filename,clobber=True)
def getformat(t):
"""Get the format string of type t.
"""
conv = {
npy.dtype(npy.bool): 'L',
npy.dtype(npy.uint8): 'B',
npy.dtype(npy.int16): 'I',
npy.dtype(npy.int32): 'J',
npy.dtype(npy.int64): 'K',
npy.dtype(npy.float32): 'E',
npy.dtype(npy.float64): 'D',
npy.dtype(npy.complex64): 'C',
npy.dtype(npy.complex128): 'M'
}
try:
if t in conv:
return conv[t]
except:
pass
try:
if npy.dtype(t) in conv:
return conv[npy.dtype(t)]
except:
pass
try:
if npy.dtype(type(t)) in conv:
return conv[npy.dtype(type(t))]
except:
pass
try:
if npy.dtype(type(t[0])) in conv:
return conv[npy.dtype(type(t[0]))]
except:
pass
try:
if t is str:
return 'A'
except:
pass
try:
if type(t) is str:
return 'A%d'%(len(t))
except:
pass
try:
if type(t[0]) is str:
l=max(len(s) for s in t)
return 'A%d'%(l)
except:
pass
|
gpl-2.0
|
orcacoin-project/orcacoin
|
share/qt/make_spinner.py
|
4415
|
1035
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
mit
|
webmasterraj/GaSiProMo
|
flask/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/adodbapi.py
|
80
|
2493
|
# mssql/adodbapi.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+adodbapi
:name: adodbapi
:dbapi: adodbapi
:connectstring: mssql+adodbapi://<username>:<password>@<dsnname>
:url: http://adodbapi.sourceforge.net/
.. note::
The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and
above at this time.
"""
import datetime
from sqlalchemy import types as sqltypes, util
from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
import sys
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
return datetime.datetime(value.year, value.month, value.day)
return value
return process
class MSDialect_adodbapi(MSDialect):
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = True
driver = 'adodbapi'
@classmethod
def import_dbapi(cls):
import adodbapi as module
return module
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.DateTime: MSDateTime_adodbapi
}
)
def create_connect_args(self, url):
keys = url.query
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
connectors.append("Data Source=%s, %s" %
(keys.get("host"), keys.get("port")))
else:
connectors.append("Data Source=%s" % keys.get("host"))
connectors.append("Initial Catalog=%s" % keys.get("database"))
user = keys.get("user")
if user:
connectors.append("User Id=%s" % user)
connectors.append("Password=%s" % keys.get("password", ""))
else:
connectors.append("Integrated Security=SSPI")
return [[";".join(connectors)], {}]
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
"'connection failure'" in str(e)
dialect = MSDialect_adodbapi
|
gpl-2.0
|
ezrover/pjsip-2.3
|
tests/pjsua/mod_call.py
|
22
|
5288
|
# $Id$
import time
import imp
import sys
import inc_const as const
from inc_cfg import *
# Load configuration
cfg_file = imp.load_source("cfg_file", ARGS[1])
# Check media flow between ua1 and ua2
def check_media(ua1, ua2):
ua1.send("#")
ua1.expect("#")
ua1.send("1122")
ua2.expect(const.RX_DTMF + "1")
ua2.expect(const.RX_DTMF + "1")
ua2.expect(const.RX_DTMF + "2")
ua2.expect(const.RX_DTMF + "2")
# Test body function
def test_func(t):
callee = t.process[0]
caller = t.process[1]
# if have_reg then wait for couple of seconds for PUBLISH
# to complete (just in case pUBLISH is used)
if callee.inst_param.have_reg:
time.sleep(1)
if caller.inst_param.have_reg:
time.sleep(1)
# Caller making call
caller.send("m")
caller.send(t.inst_params[0].uri)
caller.expect(const.STATE_CALLING)
# Callee waits for call and answers with 180/Ringing
time.sleep(0.2)
callee.expect(const.EVENT_INCOMING_CALL)
callee.send("a")
callee.send("180")
callee.expect("SIP/2.0 180")
caller.expect("SIP/2.0 180")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Callee answers with 200/OK
callee.send("a")
callee.send("200")
# Wait until call is connected in both endpoints
time.sleep(0.2)
caller.expect(const.STATE_CONFIRMED)
callee.expect(const.STATE_CONFIRMED)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
time.sleep(0.1)
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
time.sleep(0.3)
check_media(caller, callee)
check_media(callee, caller)
# Hold call by caller
caller.send("H")
caller.expect("INVITE sip:")
callee.expect("INVITE sip:")
caller.expect(const.MEDIA_HOLD)
callee.expect(const.MEDIA_HOLD)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Release hold
time.sleep(0.5)
caller.send("v")
caller.expect("INVITE sip:")
callee.expect("INVITE sip:")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
check_media(caller, callee)
check_media(callee, caller)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Hold call by callee
callee.send("H")
callee.expect("INVITE sip:")
caller.expect("INVITE sip:")
caller.expect(const.MEDIA_HOLD)
callee.expect(const.MEDIA_HOLD)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Release hold
time.sleep(0.1)
callee.send("v")
callee.expect("INVITE sip:")
caller.expect("INVITE sip:")
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active after call hold")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
# Wait for some time for ICE negotiation
time.sleep(0.6)
check_media(caller, callee)
check_media(callee, caller)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# UPDATE (by caller)
caller.send("U")
#caller.sync_stdout()
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
time.sleep(0.1)
check_media(caller, callee)
check_media(callee, caller)
# UPDATE (by callee)
callee.send("U")
callee.expect("UPDATE sip:")
caller.expect("UPDATE sip:")
caller.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
callee.expect(const.MEDIA_ACTIVE, title="waiting for media active with UPDATE")
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Test that media is okay
time.sleep(0.1)
check_media(caller, callee)
check_media(callee, caller)
# Synchronize stdout
caller.sync_stdout()
callee.sync_stdout()
# Set codecs in both caller and callee so that there is
# no common codec between them.
# In caller we only enable PCMU, in callee we only enable PCMA
caller.send("Cp")
caller.expect("Enter codec")
caller.send("* 0")
caller.send("Cp")
caller.expect("Enter codec")
caller.send("pcmu 120")
callee.send("Cp")
callee.expect("Enter codec")
callee.send("* 0")
callee.send("Cp")
callee.expect("Enter codec")
callee.send("pcma 120")
# Test when UPDATE fails (by callee)
callee.send("U")
caller.expect("SIP/2.0 488")
callee.expect("SIP/2.0 488")
callee.sync_stdout()
caller.sync_stdout()
# Test that media is still okay
time.sleep(0.1)
check_media(caller, callee)
check_media(callee, caller)
# Test when UPDATE fails (by caller)
caller.send("U")
caller.expect("UPDATE sip:")
callee.expect("UPDATE sip:")
callee.expect("SIP/2.0 488")
caller.expect("SIP/2.0 488")
caller.sync_stdout()
callee.sync_stdout()
# Test that media is still okay
time.sleep(0.1)
check_media(callee, caller)
check_media(caller, callee)
# Hangup call
time.sleep(0.1)
caller.send("h")
# Wait until calls are cleared in both endpoints
caller.expect(const.STATE_DISCONNECTED)
callee.expect(const.STATE_DISCONNECTED)
# Here where it all comes together
test = cfg_file.test_param
test.test_func = test_func
|
gpl-2.0
|
DistributedSystemsGroup/zoe
|
zoe_master/backends/docker/backend.py
|
1
|
7197
|
# Copyright (c) 2017, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zoe backend implementation for one or more Docker Engines."""
import logging
import re
import time
from typing import Union
from zoe_lib.config import get_conf
from zoe_lib.state import Service
import zoe_master.backends.base
from zoe_master.backends.docker.api_client import DockerClient
from zoe_master.backends.docker.config import DockerConfig, DockerHostConfig # pylint: disable=unused-import
from zoe_master.backends.docker.threads import DockerStateSynchronizer
from zoe_master.backends.service_instance import ServiceInstance
from zoe_master.exceptions import ZoeStartExecutionRetryException, ZoeStartExecutionFatalException, ZoeException, ZoeNotEnoughResourcesException
from zoe_master.stats import ClusterStats
log = logging.getLogger(__name__)
# This module-level variable holds the references to the synchro threads
_checker = None
class DockerEngineBackend(zoe_master.backends.base.BaseBackend):
"""Zoe backend implementation for old-style stand-alone Docker Swarm."""
def __init__(self, opts):
super().__init__(opts)
self.docker_config = DockerConfig(get_conf().backend_docker_config_file).read_config()
def _get_config(self, host) -> Union[DockerHostConfig, None]:
for conf in self.docker_config:
if conf.name == host:
return conf
return None
@classmethod
def init(cls, state):
"""Initializes Swarm backend starting the event monitoring thread."""
global _checker
_checker = DockerStateSynchronizer(state)
@classmethod
def shutdown(cls):
"""Performs a clean shutdown of the resources used by Swarm backend."""
_checker.quit()
def spawn_service(self, service_instance: ServiceInstance):
"""Spawn a service, translating a Zoe Service into a Docker container."""
parsed_name = re.search(r'^(?:([^/]+)/)?(?:([^/]+)/)?([^@:/]+)(?:[@:](.+))?$', service_instance.image_name)
if parsed_name.group(4) is None:
raise ZoeStartExecutionFatalException('Image {} does not have a version tag'.format(service_instance.image_name))
conf = self._get_config(service_instance.backend_host)
try:
engine = DockerClient(conf)
cont_info = engine.spawn_container(service_instance)
except ZoeNotEnoughResourcesException:
raise ZoeStartExecutionRetryException('Not enough free resources to satisfy reservation request for service {}'.format(service_instance.name))
except ZoeException as e:
raise ZoeStartExecutionFatalException(str(e))
return cont_info["id"], cont_info['external_address'], cont_info['ports']
def terminate_service(self, service: Service) -> None:
"""Terminate and delete a container."""
conf = self._get_config(service.backend_host)
service.set_terminating()
try:
engine = DockerClient(conf)
except ZoeException as e:
log.error('Cannot terminate service {}: {}'.format(service.id, str(e)))
return
if service.backend_id is not None:
engine.terminate_container(service.backend_id, delete=True)
else:
log.error('Cannot terminate service {}, since it has no backend ID'.format(service.name))
service.set_backend_status(service.BACKEND_DESTROY_STATUS)
def platform_state(self) -> ClusterStats:
"""Get the platform state."""
platform_stats = ClusterStats()
for host_conf in self.docker_config: # type: DockerHostConfig
try:
node_stats = _checker.host_stats[host_conf.name]
except KeyError:
continue
platform_stats.nodes.append(node_stats)
platform_stats.timestamp = time.time()
return platform_stats
def node_list(self):
"""Return a list of node names."""
return [node.name for node in self.docker_config]
def service_log(self, service: Service):
"""Get the log."""
conf = self._get_config(service.backend_host)
engine = DockerClient(conf)
return engine.logs(service.backend_id, True, False)
def preload_image(self, image_name):
"""Pull an image from a Docker registry into each host. We shuffle the list to prevent the scheduler to find always the first host in the list."""
parsed_name = re.search(r'^(?:([^/]+)/)?(?:([^/]+)/)?([^@:/]+)(?:[@:](.+))?$', image_name)
if parsed_name.group(4) is None:
raise ZoeException('Image {} does not have a version tag'.format(image_name))
one_success = False
for host_conf in self.docker_config:
log.debug('Pre-loading image {} on host {}'.format(image_name, host_conf.name))
time_start = time.time()
my_engine = DockerClient(host_conf)
try:
my_engine.pull_image(image_name)
except ZoeException:
log.error('Image {} pre-loading failed on host {}'.format(image_name, host_conf.name))
continue
else:
one_success = True
log.debug('Image {} pre-loaded on host {} in {:.2f}s'.format(image_name, host_conf.name, time.time() - time_start))
if not one_success:
raise ZoeException('Cannot pull image {}'.format(image_name))
def list_available_images(self, node_name):
"""List the images available on the specified node."""
node_stats = _checker.host_stats[node_name]
if node_stats.status == 'offline':
return []
return node_stats.images
def update_service(self, service, cores=None, memory=None):
"""Update a service reservation."""
conf = self._get_config(service.backend_host)
try:
engine = DockerClient(conf)
except ZoeException as e:
log.error(str(e))
return
if service.backend_id is not None:
info = engine.info()
if cores is not None and cores > info['NCPU']:
cores = info['NCPU']
if memory is not None and memory > info['MemTotal']:
memory = info['MemTotal']
cpu_quota = int(cores * 100000)
engine.update(service.backend_id, cpu_quota=cpu_quota, mem_reservation=memory)
else:
log.error('Cannot update reservations for service {} ({}), since it has no backend ID'.format(service.name, service.id))
if service.status == service.INACTIVE_STATUS:
service.set_backend_status(service.BACKEND_UNDEFINED_STATUS)
|
apache-2.0
|
okfn/hashtag-listener
|
controllers.py
|
1
|
1538
|
from flask import render_template, request, json, send_from_directory
from bleach import linkify
from app import app, db
import models as m
@app.route('/')
def index():
params = {
'title': 'Hashtag Listener',
}
return render_template('index.html', **params)
@app.route('/api', methods=['POST'])
def api():
auth = request.headers.get('Authorization')
if auth != app.config['APIKEY']:
return json.dumps({'success': False, 'message': 'Not authorized'}), 400
data = request.get_json(force=True)
if not data:
return json.dumps({'success': False, 'message': 'JSON data not found '
'or mimetype not set'}), 400
try:
entry = m.Entry(
username=data.get('username'),
entry_type=data.get('type'),
entry_text=data.get('text')
)
except m.ValidationError:
return json.dumps({'success': False, 'message': 'Validation error with'
' the data provided'}), 400
db.session.add(entry)
db.session.commit()
return json.dumps({'success': True})
@app.route('/entry/<name>')
def entry_show(name=None):
params = {
'title': ''.join(['#', name]),
'data': m.Entry.query.filter_by(entry_type=name)
.order_by(m.Entry.created.desc()).all(),
'linkify': linkify,
}
return render_template('entry.html', **params)
@app.route('/robots.txt')
def robots():
return send_from_directory(app.static_folder, request.path[1:])
|
mit
|
gdetor/SI-RF-Structure
|
DiCarloProtocol/figure2-dicarlo.py
|
1
|
18122
|
# Copyright (c) 2014, Georgios Is. Detorakis (gdetor@gmail.com) and
# Nicolas P. Rougier (nicolas.rougier@inria.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script reproduces the second figure of DiCarlo et al., 1998 using a
# computational method given in [1].
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from scipy.spatial.distance import cdist
from scipy.ndimage.filters import gaussian_filter
from numpy.fft import rfft2, ifftshift, irfft2
def extract(Z, position, shape, fill=0):
# assert(len(position) == len(Z.shape))
# if len(shape) < len(Z.shape):
# shape = shape + Z.shape[len(Z.shape)-len(shape):]
R = np.ones(shape, dtype=Z.dtype)*fill
P = np.array(list(position)).astype(int)
Rs = np.array(list(R.shape)).astype(int)
Zs = np.array(list(Z.shape)).astype(int)
R_start = np.zeros((len(shape),)).astype(int)
R_stop = np.array(list(shape)).astype(int)
Z_start = (P-Rs//2)
Z_stop = (P+Rs//2)+Rs%2
R_start = (R_start - np.minimum(Z_start,0)).tolist()
Z_start = (np.maximum(Z_start,0)).tolist()
#R_stop = (R_stop - np.maximum(Z_stop-Zs,0)).tolist()
R_stop = np.maximum(R_start, (R_stop - np.maximum(Z_stop-Zs,0))).tolist()
Z_stop = (np.minimum(Z_stop,Zs)).tolist()
r = [slice(start,stop) for start,stop in zip(R_start,R_stop)]
z = [slice(start,stop) for start,stop in zip(Z_start,Z_stop)]
R[r] = Z[z]
return R
# -----------------------------------------------------------------------------
def thresholded(data, threshold):
return np.where(abs(data) < threshold, 0.0,data)
def locate_noise( input ):
n = input.shape[0]
data = input.copy()
for i in range(1,n-1):
for j in range(1,n-1):
count = 0
if data[i,j] != 0:
if data[i+1,j] != 0 and np.sign(data[i+1,j])==np.sign(data[i,j]):
count += 1
if data[i-1,j] != 0 and np.sign(data[i-1,j])==np.sign(data[i,j]):
count += 1
if data[i,j-1] != 0 and np.sign(data[i,j-1])==np.sign(data[i,j]):
count += 1
if data[i,j+1] != 0 and np.sign(data[i,j+1])==np.sign(data[i,j]):
count += 1
if count < 2:
data[i,j] = 0
return data
def cleanup(RF):
size = RF.shape[0]
#RF = gaussian_filter(RF, sigma=1.5)
#threshold = 0.05*np.abs(RF.max())
#RF = thresholded(RF.ravel(), threshold)
#RF = locate_noise(RF.reshape(size,size))
return RF
# -------------------------------------
def grid(n, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, noise=0.0):
_X = (np.resize(np.linspace(xmin,xmax,n),(n,n))).ravel()
_Y = (np.resize(np.linspace(ymin,ymax,n),(n,n)).T).ravel()
X = _X + np.random.uniform(-noise, noise, n*n)
Y = _Y + np.random.uniform(-noise, noise, n*n)
Imin, Imax = np.argwhere(X < xmin), np.argwhere(X > xmax)
while len(Imin) or len(Imax):
X[Imin] = _X[Imin] + np.random.uniform(-noise, noise, len(Imin))
X[Imax] = _X[Imax] + np.random.uniform(-noise, noise, len(Imax))
Imin, Imax = np.argwhere(X < xmin), np.argwhere(X > xmax)
Imin, Imax = np.argwhere(Y < ymin), np.argwhere(Y > ymax)
while len(Imin) or len(Imax):
Y[Imin] = _Y[Imin] + np.random.uniform(-noise, noise, len(Imin))
Y[Imax] = _Y[Imax] + np.random.uniform(-noise, noise, len(Imax))
Imin, Imax = np.argwhere(Y < ymin), np.argwhere(Y > ymax)
Z = np.zeros((n*n, 2))
Z[:,0], Z[:,1] = X.ravel(), Y.ravel()
return Z
def g(x,sigma = 0.1):
return np.exp(-x**2/sigma**2)
def fromdistance(fn, shape, center=None, dtype=float):
def distance(*args):
d = 0
for i in range(len(shape)):
d += ((args[i]-center[i])/float(max(1,shape[i]-1)))**2
return np.sqrt(d)/np.sqrt(len(shape))
if center == None:
center = np.array(list(shape))//2
return fn(np.fromfunction(distance,shape,dtype=dtype))
def Gaussian(shape,center,sigma=0.5):
def g(x):
return np.exp(-x**2/sigma**2)
return fromdistance(g,shape,center)
def generate_input(R,S):
"""
Given a grid of receptors and a list of stimuli positions, return the
corresponding input
"""
if len(S):
dX = np.abs(R[:,0].reshape(1,len(R)) - S[:,0].reshape(len(S),1))
dY = np.abs(R[:,1].reshape(1,len(R)) - S[:,1].reshape(len(S),1))
C = np.sqrt(dX*dX+dY*dY) / np.sqrt(2)
return g(C).max(axis=0)
return np.zeros(R.shape[0])
def dnf_response( n, Rn, stimulus, w, we, wi, time, dt ):
alpha, tau = 0.1, 1.0
U = np.random.random((n,n)) * .01
V = np.random.random((n,n)) * .01
V_shape = np.array(V.shape)
# Computes field input accordingly
D = (( np.abs( w - stimulus )).sum(axis=-1))/float(Rn*Rn)
I = ( 1.0 - D.reshape(n,n) ) * alpha
for j in range( int(time/dt) ):
Z = rfft2( V * alpha )
Le = irfft2( Z * we, V_shape).real
Li = irfft2( Z * wi, V_shape).real
U += ( -U + ( Le - Li ) + I )* dt * tau
V = np.maximum( U, 0.0 )
return V
def h(x, sigma=1.0):
return np.exp(-0.5*(x/sigma)**2)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# Seed for reproductibility
# -------------------------
np.random.seed(137)
# Standard units
# --------------
second = 1.0
millisecond = 1e-3 * second
ms = millisecond
minute = 60 * second
meter = 1.0
millimeter = 1e-3 * meter
mm = millimeter
micrometer = 1e-6 * meter
# Simulation parameters
# ---------------------
dots_number = 750
drum_length = 250*mm
drum_width = 30*mm
drum_shift = 200*micrometer
drum_velocity = 40*mm / second
simulation_time = 5*minute
sampling_rate = 5*ms
dt = sampling_rate
skinpatch = 10*mm,10*mm # width x height
RF_sampling = 25,25
learning_som = False
learning = False
Rn = 16
# R = grid(Rn,noise=0.15)
# Generate the drum pattern
# -------------------------
drum = np.zeros( (dots_number,2) )
drum[:,0] = np.random.uniform(0,drum_length,dots_number)
drum[:,1] = np.random.uniform(0,drum_width, dots_number)
drum_x,drum_y = drum[:,0], drum[:,1]
# Show the drum
# -------------
if 0:
plt.figure(figsize = (16, 1+10 * drum_width/drum_length))
plt.subplot(111,aspect=1)
plt.scatter(drum_x, drum_y, s=10, facecolor='k', edgecolor='k')
plt.xlim(0,drum_length)
plt.xlabel("mm")
plt.ylim(0,drum_width)
plt.ylabel("mm")
plt.show()
print "Estimated number of samples: %d" % (simulation_time/dt)
# SOM learning
# -------------
Sn = 32
folder = '/home/Local/SOM/Attention/REF/'
W = np.load( folder+'weights050000.npy' )
R = np.zeros((Rn*Rn,2))
R[:,0] = np.load( folder+'gridxcoord.npy' )
R[:,1] = np.load( folder+'gridycoord.npy' )
RF_count = np.zeros((Sn,Sn,25,25))
RF_sum = np.zeros((Sn,Sn,25,25))
global_count = np.zeros((Sn,Sn))
global_sum = np.zeros((Sn,Sn))
scale = 960.0/(Sn*Sn)
x_inf, x_sup, y_inf, y_sup = 0.0, 1.0, 0.0, 1.0
X, Y = np.meshgrid( np.linspace(x_inf,x_sup,Sn+1,endpoint=True)[1:],
np.linspace(y_inf,y_sup,Sn+1,endpoint=True)[1:] )
D = np.sqrt( (X-0.5)**2 + (Y-0.5)**2 )
We = 3.65 * scale * h( D, 0.1 )
Wi = 2.40 * scale * h( D, 1.0 )
We_fft = rfft2( ifftshift( We[::-1,::-1] ) )
Wi_fft = rfft2( ifftshift( Wi[::-1,::-1] ) )
if learning:
# Run the simulated drum
for t in np.arange(0.0,simulation_time,dt):
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
y = int(z / (drum_length - skinpatch[0])) * drum_shift
# Maybe this should be adjusted since a stimulus lying outside the skin
# patch may still have influence on the input (for example, if it lies
# very near the border)
xmin, xmax = x, x+skinpatch[0]
ymin, ymax = y, y+skinpatch[1]
# Get dots contained on the skin patch (and normalize coordinates)
dots = drum[(drum_x > (xmin)) *
(drum_x < (xmax)) *
(drum_y > (ymin)) *
(drum_y < (ymax))]
dots -= (x,y)
dots /= skinpatch[0],skinpatch[1]
# Compute RF mask
RF_mask = np.zeros(RF_sampling)
for dot in dots:
index = (np.floor(dot*RF_sampling)).astype(int)
RF_mask[index[1],index[0]] = 1
# Compute corresponding input (according to receptors)
I = generate_input(R,dots)
# Generate the som answer
V = dnf_response( Sn, Rn, I, W, We_fft, Wi_fft, 10.0, 25.0*.001 )
# Compute the mean firing rate
global_sum += V
global_count += 1
# Compute the local mean firing rate
RF_sum += V.reshape(Sn,Sn,1,1)*RF_mask
RF_count += RF_mask
# Display current skin patch dots and mask
if 0:
plt.figure(figsize=(10,10))
plt.subplot(111,aspect=1)
plt.scatter(dots[:,0],dots[:,1], s=50, facecolor='w', edgecolor='k')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.show()
mean = global_sum/(global_count+1)
RFs = RF_sum/(RF_count+1) - mean.reshape(Sn,Sn,1,1)
if learning: np.save( folder+"RFs.npy", RFs)
RFs = np.load( folder+"RFs.npy")
# Reconstitute the drum from model answers which does not make much sense
# We should use the RF of a given neuron in fact and modulate according to
# its answer or convolute the RF with current dot pattern
if 1:
Rc_y = (drum_length/skinpatch[0]) * Sn
Rc_x = (drum_width/skinpatch[1]) * Sn
Rc = np.zeros((Rc_x,Rc_y))
for t in np.arange(0.0,simulation_time,dt):
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
y = int(z / (drum_length - skinpatch[0])) * drum_shift
# Maybe this should be adjusted since a stimulus lying outside the skin
# patch may still have influence on the input (for example, if it lies
# very near the border)
xmin, xmax = x, x+skinpatch[0]
ymin, ymax = y, y+skinpatch[1]
# Get dots contained on the skin patch (and normalize coordinates)
dots = drum[(drum_x > (xmin)) *
(drum_x < (xmax)) *
(drum_y > (ymin)) *
(drum_y < (ymax))]
dots -= (x,y)
dots /= skinpatch[0],skinpatch[1]
# Compute RF mask
RF_mask = np.zeros(RF_sampling)
for dot in dots:
index = (np.floor(dot*RF_sampling)).astype(int)
RF_mask[index[1],index[0]] = 1
# Compute corresponding input (according to receptors)
I = generate_input(R,dots)
# Generate the neural field answer
V = dnf_response( Sn, Rn, I, W, We_fft, Wi_fft, 10.0, 25.0*.001 )
x = int((x/float(drum_length))*Rc_y)
y = int((y/float(drum_width))*Rc_x)
Rc[y:y+Sn,x:x+Sn] = np.maximum(V,Rc[y:y+Sn,x:x+Sn])
# Rc[y:y+Rn,x:x+Rn] += V
# Compute y limit (we may have ended before end of drum)
t = simulation_time
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
ymax = int(z / (drum_length - skinpatch[0])) * drum_shift + skinpatch[0]
plt.figure(figsize = (16, 1+10 * drum_width/drum_length))
plt.subplot(111,aspect=1)
plt.imshow(Rc, origin='lower', interpolation='bicubic', alpha=1,
cmap = plt.cm.gray_r, extent = [0, drum_length, 0, drum_width])
plt.scatter(drum_x, drum_y, s=5, facecolor='w', edgecolor='k', alpha=.5)
plt.xlim(0,drum_length)
plt.xlabel("mm")
#plt.ylim(0,drum_width)
plt.ylim(0,ymax)
plt.ylabel("mm")
plt.show()
# Show all RFs
if 0:
Z = np.zeros((Sn,25,Sn,25))
for i in range(Sn):
for j in range(Sn):
RF = cleanup(RFs[i,j])
# R = np.where(R<0, R/np.abs(R.min()),R/np.abs(R.max()))
Z[i,:,j,:] = RF
Z = Z.reshape(Sn*25,Sn*25)
plt.figure(figsize=(14,10))
plt.imshow(Z, interpolation='bicubic', origin='lower', cmap=plt.cm.PuOr_r, extent=(0,Sn,0,Sn))
plt.colorbar()
plt.xlim(0,Sn), plt.xticks(np.arange(Sn))
plt.ylim(0,Sn), plt.yticks(np.arange(Sn))
plt.grid()
plt.title("Normalized Receptive fields", fontsize=16)
plt.show()
# Show a random RF
if 0:
i,j = np.random.randint(0,Sn,2)
i,j = 8,8
RF = cleanup(RFs[i,j])
plt.figure(figsize=(8,6))
plt.imshow(RF, interpolation='nearest', origin='lower',
cmap=plt.cm.gray_r, extent=[0,10,0,10])
plt.colorbar()
lmin = 0.50 * RF.min()
lmax = 0.50 * RF.max()
#CS = plt.contour(zoom(RF,10), levels=[lmin,lmax], colors='w',
# origin='lower', extent=[0,10,0,10], linewidths=1, alpha=1.0)
#plt.clabel(CS, inline=1, fontsize=12)
plt.xlim(0,10), plt.xlabel("mm")
plt.ylim(0,10), plt.ylabel("mm")
plt.title("Normalized Receptive Field [%d,%d]" % (i,j), fontsize=16)
plt.show()
# Show excitatory/inhibitory ratio (scatter plot)
if 0:
matplotlib.rc('xtick', direction = 'out')
matplotlib.rc('ytick', direction = 'out')
matplotlib.rc('xtick.major', size = 8, width=1)
matplotlib.rc('xtick.minor', size = 4, width=1)
matplotlib.rc('ytick.major', size = 8, width=1)
matplotlib.rc('ytick.minor', size = 4, width=1)
Z = []
for i in range(Sn):
for j in range(Sn):
p = 25
RF = RFs[i,j]
RF_max = np.abs(RF.max())
#winner = np.unravel_index(np.argmax(RF), RF.shape)
#RF = extract(RF,winner,(p,p))
RF = cleanup(RFs[i,j])
exc = 100 * ((RF >= +0.1*RF_max).sum()/ float(p*p))
inh = 50 * ((RF <= -0.1*RF_max).sum()/ float(p*p))
Z.append([exc,inh])
Z = np.array(Z)
X,Y = Z[:,0], Z[:,1]
fig = plt.figure(figsize=(8,8), facecolor="white")
ax = plt.subplot(1,1,1,aspect=1)
plt.scatter(X+0.01,Y+0.01,s=5,color='k',alpha=0.25)
# Show some points
# I = [3,143,149,189,1,209,192,167,64,87,10,40,68,185,61,198]
# plt.scatter(X[I],Y[I],s=5,color='k')
# for i in range(len(I)):
# plt.annotate(" %c" % (chr(ord('A')+i)), (X[I[i]],Y[I[i]]), weight='bold')
# Select some points by cliking them
# letter = ord('A')
# def onclick(event):
# global letter
# #print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# # event.button, event.x, event.y, event.xdata, event.ydata)
# C = (X-event.xdata)**2 + (Y-event.ydata)**2
# I = np.argmin(C)
# print I
# plt.ion()
# x,y = X[I],Y[I]
# plt.scatter(x,y,s=5,color='k')
# plt.annotate(" %c" % (chr(letter)), (x,y), weight='bold')
# plt.ioff()
# letter = letter+1
# cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.xlabel("Excitatory area (mm2)")
plt.ylabel("Inhibitory area (mm2)")
plt.xlim(1,100)
plt.ylim(1,100)
plt.xscale('log')
plt.yscale('log')
plt.xticks([1,10,100], ['1','10','100'])
plt.yticks([1,10,100], ['1','10','100'])
plt.plot([1,100],[1,100], ls='--', color='k')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.show()
|
gpl-3.0
|
ccomb/OpenUpgrade
|
addons/stock/product.py
|
6
|
25318
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
import openerp.addons.decimal_precision as dp
class product_product(osv.osv):
_inherit = "product.product"
def _stock_move_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict([(id, {'reception_count': 0, 'delivery_count': 0}) for id in ids])
move_pool=self.pool.get('stock.move')
moves = move_pool.read_group(cr, uid, [
('product_id', 'in', ids),
('location_id.usage', '!=', 'internal'),
('location_dest_id.usage', '=', 'internal'),
('state','in',('confirmed','assigned','pending'))
], ['product_id'], ['product_id'])
for move in moves:
product_id = move['product_id'][0]
res[product_id]['reception_count'] = move['product_id_count']
moves = move_pool.read_group(cr, uid, [
('product_id', 'in', ids),
('location_id.usage', '=', 'internal'),
('location_dest_id.usage', '!=', 'internal'),
('state','in',('confirmed','assigned','pending'))
], ['product_id'], ['product_id'])
for move in moves:
product_id = move['product_id'][0]
res[product_id]['delivery_count'] = move['product_id_count']
return res
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, user, view_id, view_type, context)
if res: return res
if (context.get('active_id', False)) and (context.get('active_model') == 'stock.location'):
return _('Products: ')+self.pool.get('stock.location').browse(cr, user, context['active_id'], context).name
return res
def _get_domain_locations(self, cr, uid, ids, context=None):
'''
Parses the context and returns a list of location_ids based on it.
It will return all stock locations when no parameters are given
Possible parameters are shop, warehouse, location, force_company, compute_child
'''
context = context or {}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
location_ids = []
if context.get('location', False):
if type(context['location']) == type(1):
location_ids = [context['location']]
elif type(context['location']) in (type(''), type(u'')):
domain = [('complete_name','ilike',context['location'])]
if context.get('force_company', False):
domain += [('company_id', '=', context['force_company'])]
location_ids = location_obj.search(cr, uid, domain, context=context)
else:
location_ids = context['location']
else:
if context.get('warehouse', False):
wids = [context['warehouse']]
else:
wids = warehouse_obj.search(cr, uid, [], context=context)
for w in warehouse_obj.browse(cr, uid, wids, context=context):
location_ids.append(w.view_location_id.id)
operator = context.get('compute_child', True) and 'child_of' or 'in'
domain = context.get('force_company', False) and ['&', ('company_id', '=', context['force_company'])] or []
return (
domain + [('location_id', operator, location_ids)],
domain + ['&', ('location_dest_id', operator, location_ids), '!', ('location_id', operator, location_ids)],
domain + ['&', ('location_id', operator, location_ids), '!', ('location_dest_id', operator, location_ids)]
)
def _get_domain_dates(self, cr, uid, ids, context):
from_date = context.get('from_date', False)
to_date = context.get('to_date', False)
domain = []
if from_date:
domain.append(('date', '>=', from_date))
if to_date:
domain.append(('date', '<=', to_date))
return domain
def _product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):
context = context or {}
field_names = field_names or []
domain_products = [('product_id', 'in', ids)]
domain_quant, domain_move_in, domain_move_out = self._get_domain_locations(cr, uid, ids, context=context)
domain_move_in += self._get_domain_dates(cr, uid, ids, context=context) + [('state', 'not in', ('done', 'cancel'))] + domain_products
domain_move_out += self._get_domain_dates(cr, uid, ids, context=context) + [('state', 'not in', ('done', 'cancel'))] + domain_products
domain_quant += domain_products
if context.get('lot_id') or context.get('owner_id') or context.get('package_id'):
if context.get('lot_id'):
domain_quant.append(('lot_id', '=', context['lot_id']))
if context.get('owner_id'):
domain_quant.append(('owner_id', '=', context['owner_id']))
if context.get('package_id'):
domain_quant.append(('package_id', '=', context['package_id']))
moves_in = []
moves_out = []
else:
moves_in = self.pool.get('stock.move').read_group(cr, uid, domain_move_in, ['product_id', 'product_qty'], ['product_id'], context=context)
moves_out = self.pool.get('stock.move').read_group(cr, uid, domain_move_out, ['product_id', 'product_qty'], ['product_id'], context=context)
quants = self.pool.get('stock.quant').read_group(cr, uid, domain_quant, ['product_id', 'qty'], ['product_id'], context=context)
quants = dict(map(lambda x: (x['product_id'][0], x['qty']), quants))
moves_in = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_in))
moves_out = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_out))
res = {}
for id in ids:
res[id] = {
'qty_available': quants.get(id, 0.0),
'incoming_qty': moves_in.get(id, 0.0),
'outgoing_qty': moves_out.get(id, 0.0),
'virtual_available': quants.get(id, 0.0) + moves_in.get(id, 0.0) - moves_out.get(id, 0.0),
}
return res
def _search_product_quantity(self, cr, uid, obj, name, domain, context):
res = []
for field, operator, value in domain:
#to prevent sql injections
assert field in ('qty_available', 'virtual_available', 'incoming_qty', 'outgoing_qty'), 'Invalid domain left operand'
assert operator in ('<', '>', '=', '<=', '>='), 'Invalid domain operator'
assert isinstance(value, (float, int)), 'Invalid domain right operand'
if operator == '=':
operator = '=='
product_ids = self.search(cr, uid, [], context=context)
ids = []
if product_ids:
#TODO: use a query instead of this browse record which is probably making the too much requests, but don't forget
#the context that can be set with a location, an owner...
for element in self.browse(cr, uid, product_ids, context=context):
if eval(str(element[field]) + operator + str(value)):
ids.append(element.id)
res.append(('id', 'in', ids))
return res
_columns = {
'reception_count': fields.function(_stock_move_count, string="Reception", type='integer', multi='pickings'),
'delivery_count': fields.function(_stock_move_count, string="Delivery", type='integer', multi='pickings'),
'qty_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Quantity On Hand',
fnct_search=_search_product_quantity,
help="Current quantity of products.\n"
"In a context with a single Stock Location, this includes "
"goods stored at this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"stored in the Stock Location of the Warehouse of this Shop, "
"or any of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'virtual_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Forecast Quantity',
fnct_search=_search_product_quantity,
help="Forecast quantity (computed as Quantity On Hand "
"- Outgoing + Incoming)\n"
"In a context with a single Stock Location, this includes "
"goods stored in this location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'incoming_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Incoming',
fnct_search=_search_product_quantity,
help="Quantity of products that are planned to arrive.\n"
"In a context with a single Stock Location, this includes "
"goods arriving to this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods arriving to the Stock Location of this Warehouse, or "
"any of its children.\n"
"Otherwise, this includes goods arriving to any Stock "
"Location with 'internal' type."),
'outgoing_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Outgoing',
fnct_search=_search_product_quantity,
help="Quantity of products that are planned to leave.\n"
"In a context with a single Stock Location, this includes "
"goods leaving this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods leaving the Stock Location of this Warehouse, or "
"any of its children.\n"
"Otherwise, this includes goods leaving any Stock "
"Location with 'internal' type."),
'location_id': fields.dummy(string='Location', relation='stock.location', type='many2one'),
'warehouse_id': fields.dummy(string='Warehouse', relation='stock.warehouse', type='many2one'),
'orderpoint_ids': fields.one2many('stock.warehouse.orderpoint', 'product_id', 'Minimum Stock Rules'),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(product_product,self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
if context is None:
context = {}
if ('location' in context) and context['location']:
location_info = self.pool.get('stock.location').browse(cr, uid, context['location'])
fields=res.get('fields',{})
if fields:
if location_info.usage == 'supplier':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Receptions')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Received Qty')
if location_info.usage == 'internal':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Stock')
if location_info.usage == 'customer':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Deliveries')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Delivered Qty')
if location_info.usage == 'inventory':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future P&L')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('P&L Qty')
if location_info.usage == 'procurement':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Qty')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Unplanned Qty')
if location_info.usage == 'production':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Productions')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Produced Qty')
return res
class product_template(osv.osv):
_name = 'product.template'
_inherit = 'product.template'
def _product_available(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, 0)
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = {
# "reception_count": sum([p.reception_count for p in product.product_variant_ids]),
# "delivery_count": sum([p.delivery_count for p in product.product_variant_ids]),
"qty_available": sum([p.qty_available for p in product.product_variant_ids]),
"virtual_available": sum([p.virtual_available for p in product.product_variant_ids]),
"incoming_qty": sum([p.incoming_qty for p in product.product_variant_ids]),
"outgoing_qty": sum([p.outgoing_qty for p in product.product_variant_ids]),
}
return res
def _search_product_quantity(self, cr, uid, obj, name, domain, context):
prod = self.pool.get("product.product")
res = []
for field, operator, value in domain:
#to prevent sql injections
assert field in ('qty_available', 'virtual_available', 'incoming_qty', 'outgoing_qty'), 'Invalid domain left operand'
assert operator in ('<', '>', '=', '<=', '>='), 'Invalid domain operator'
assert isinstance(value, (float, int)), 'Invalid domain right operand'
if operator == '=':
operator = '=='
product_ids = prod.search(cr, uid, [], context=context)
ids = []
if product_ids:
#TODO: use a query instead of this browse record which is probably making the too much requests, but don't forget
#the context that can be set with a location, an owner...
for element in prod.browse(cr, uid, product_ids, context=context):
if eval(str(element[field]) + operator + str(value)):
ids.append(element.id)
res.append(('product_variant_ids', 'in', ids))
return res
_columns = {
'valuation':fields.selection([('manual_periodic', 'Periodical (manual)'),
('real_time','Real Time (automated)'),], 'Inventory Valuation',
help="If real-time valuation is enabled for a product, the system will automatically write journal entries corresponding to stock moves." \
"The inventory variation account set on the product category will represent the current inventory value, and the stock input and stock output account will hold the counterpart moves for incoming and outgoing products."
, required=True),
'type': fields.selection([('product', 'Stockable Product'), ('consu', 'Consumable'), ('service', 'Service')], 'Product Type', required=True, help="Consumable: Will not imply stock management for this product. \nStockable product: Will imply stock management for this product."),
'property_stock_procurement': fields.property(
type='many2one',
relation='stock.location',
string="Procurement Location",
domain=[('usage','like','procurement')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated by procurements."),
'property_stock_production': fields.property(
type='many2one',
relation='stock.location',
string="Production Location",
domain=[('usage','like','production')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated by manufacturing orders."),
'property_stock_inventory': fields.property(
type='many2one',
relation='stock.location',
string="Inventory Location",
domain=[('usage','like','inventory')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated when you do an inventory."),
'sale_delay': fields.float('Customer Lead Time', help="The average delay in days between the confirmation of the customer order and the delivery of the finished products. It's the time you promise to your customers."),
'loc_rack': fields.char('Rack', size=16),
'loc_row': fields.char('Row', size=16),
'loc_case': fields.char('Case', size=16),
'track_incoming': fields.boolean('Track Incoming Lots', help="Forces to specify a Serial Number for all moves containing this product and coming from a Supplier Location"),
'track_outgoing': fields.boolean('Track Outgoing Lots', help="Forces to specify a Serial Number for all moves containing this product and going to a Customer Location"),
'track_all': fields.boolean('Full Lots Traceability', help="Forces to specify a Serial Number on each and every operation related to this product"),
# sum of product variant qty
# 'reception_count': fields.function(_product_available, multi='qty_available',
# fnct_search=_search_product_quantity, type='float', string='Quantity On Hand'),
# 'delivery_count': fields.function(_product_available, multi='qty_available',
# fnct_search=_search_product_quantity, type='float', string='Quantity On Hand'),
'qty_available': fields.function(_product_available, multi='qty_available',
fnct_search=_search_product_quantity, type='float', string='Quantity On Hand'),
'virtual_available': fields.function(_product_available, multi='qty_available',
fnct_search=_search_product_quantity, type='float', string='Quantity Available'),
'incoming_qty': fields.function(_product_available, multi='qty_available',
fnct_search=_search_product_quantity, type='float', string='Incoming'),
'outgoing_qty': fields.function(_product_available, multi='qty_available',
fnct_search=_search_product_quantity, type='float', string='Outgoing'),
'route_ids': fields.many2many('stock.location.route', 'stock_route_product', 'product_id', 'route_id', 'Routes', domain="[('product_selectable', '=', True)]",
help="Depending on the modules installed, this will allow you to define the route of the product: whether it will be bought, manufactured, MTO/MTS,..."),
}
_defaults = {
'sale_delay': 7,
'valuation': 'manual_periodic',
}
def action_view_routes(self, cr, uid, ids, context=None):
route_obj = self.pool.get("stock.location.route")
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
product_route_ids = set()
for product in self.browse(cr, uid, ids, context=context):
product_route_ids |= set([r.id for r in product.route_ids])
product_route_ids |= set([r.id for r in product.categ_id.total_route_ids])
route_ids = route_obj.search(cr, uid, ['|', ('id', 'in', list(product_route_ids)), ('warehouse_selectable', '=', True)], context=context)
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_routes_form')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = "[('id','in',[" + ','.join(map(str, route_ids)) + "])]"
return result
class product_removal_strategy(osv.osv):
_name = 'product.removal'
_description = 'Removal Strategy'
_columns = {
'name': fields.char('Name', required=True),
'method': fields.char("Method", required=True, help="FIFO, LIFO..."),
}
class product_putaway_strategy(osv.osv):
_name = 'product.putaway'
_description = 'Put Away Strategy'
def _get_putaway_options(self, cr, uid, context=None):
return [('fixed', 'Fixed Location')]
_columns = {
'name': fields.char('Name', required=True),
'method': fields.selection(_get_putaway_options, "Method", required=True),
'fixed_location_ids': fields.one2many('stock.fixed.putaway.strat', 'putaway_id', 'Fixed Locations Per Product Category', help="When the method is fixed, this location will be used to store the products"),
}
_defaults = {
'method': 'fixed',
}
def putaway_apply(self, cr, uid, putaway_strat, product, context=None):
if putaway_strat.method == 'fixed':
for strat in putaway_strat.fixed_location_ids:
categ = product.categ_id
while categ:
if strat.category_id.id == categ.id:
return strat.fixed_location_id.id
categ = categ.parent_id
class fixed_putaway_strat(osv.osv):
_name = 'stock.fixed.putaway.strat'
_order = 'sequence'
_columns = {
'putaway_id': fields.many2one('product.putaway', 'Put Away Method', required=True),
'category_id': fields.many2one('product.category', 'Product Category', required=True),
'fixed_location_id': fields.many2one('stock.location', 'Location', required=True),
'sequence': fields.integer('Priority', help="Give to the more specialized category, a higher priority to have them in top of the list."),
}
class product_category(osv.osv):
_inherit = 'product.category'
def calculate_total_routes(self, cr, uid, ids, name, args, context=None):
res = {}
for categ in self.browse(cr, uid, ids, context=context):
categ2 = categ
routes = [x.id for x in categ.route_ids]
while categ2.parent_id:
categ2 = categ2.parent_id
routes += [x.id for x in categ2.route_ids]
res[categ.id] = routes
return res
_columns = {
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_categ', 'categ_id', 'route_id', 'Routes', domain="[('product_categ_selectable', '=', True)]"),
'removal_strategy_id': fields.many2one('product.removal', 'Force Removal Strategy', help="Set a specific removal strategy that will be used regardless of the source location for this product category"),
'total_route_ids': fields.function(calculate_total_routes, relation='stock.location.route', type='many2many', string='Total routes', readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jmhsi/justin_tinker
|
data_science/courses/learning_dl_packages/models/research/pcl_rl/gym_wrapper.py
|
7
|
3481
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around gym env.
Allows for using batches of possibly identitically seeded environments.
"""
import gym
import numpy as np
import random
import env_spec
def get_env(env_str):
return gym.make(env_str)
class GymWrapper(object):
def __init__(self, env_str, distinct=1, count=1, seeds=None):
self.distinct = distinct
self.count = count
self.total = self.distinct * self.count
self.seeds = seeds or [random.randint(0, 1e12)
for _ in xrange(self.distinct)]
self.envs = []
for seed in self.seeds:
for _ in xrange(self.count):
env = get_env(env_str)
env.seed(seed)
if hasattr(env, 'last'):
env.last = 100 # for algorithmic envs
self.envs.append(env)
self.dones = [True] * self.total
self.num_episodes_played = 0
one_env = self.get_one()
self.use_action_list = hasattr(one_env.action_space, 'spaces')
self.env_spec = env_spec.EnvSpec(self.get_one())
def get_seeds(self):
return self.seeds
def reset(self):
self.dones = [False] * self.total
self.num_episodes_played += len(self.envs)
# reset seeds to be synchronized
self.seeds = [random.randint(0, 1e12) for _ in xrange(self.distinct)]
counter = 0
for seed in self.seeds:
for _ in xrange(self.count):
self.envs[counter].seed(seed)
counter += 1
return [self.env_spec.convert_obs_to_list(env.reset())
for env in self.envs]
def reset_if(self, predicate=None):
if predicate is None:
predicate = self.dones
if self.count != 1:
assert np.all(predicate)
return self.reset()
self.num_episodes_played += sum(predicate)
output = [self.env_spec.convert_obs_to_list(env.reset())
if pred else None
for env, pred in zip(self.envs, predicate)]
for i, pred in enumerate(predicate):
if pred:
self.dones[i] = False
return output
def all_done(self):
return all(self.dones)
def step(self, actions):
def env_step(action):
action = self.env_spec.convert_action_to_gym(action)
obs, reward, done, tt = env.step(action)
obs = self.env_spec.convert_obs_to_list(obs)
return obs, reward, done, tt
actions = zip(*actions)
outputs = [env_step(action)
if not done else (self.env_spec.initial_obs(None), 0, True, None)
for action, env, done in zip(actions, self.envs, self.dones)]
for i, (_, _, done, _) in enumerate(outputs):
self.dones[i] = self.dones[i] or done
obs, reward, done, tt = zip(*outputs)
obs = [list(oo) for oo in zip(*obs)]
return [obs, reward, done, tt]
def get_one(self):
return random.choice(self.envs)
def __len__(self):
return len(self.envs)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.